Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (C) 2017 Intel Corporation. All rights reserved.
3 : : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : : */
6 : :
7 : : #include "spdk_internal/cunit.h"
8 : :
9 : : #include "common/lib/ut_multithread.c"
10 : : #include "unit/lib/json_mock.c"
11 : :
12 : : #include "spdk/config.h"
13 : : /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 : : #undef SPDK_CONFIG_VTUNE
15 : :
16 : : #include "bdev/bdev.c"
17 : :
18 [ - + ]: 800 : DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
19 [ - + ]: 400 : DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
20 [ # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
21 : : "test_domain");
22 [ # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
23 : : (struct spdk_memory_domain *domain), 0);
24 : 0 : DEFINE_STUB_V(spdk_accel_sequence_finish,
25 : : (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
26 : 0 : DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
27 : 0 : DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
28 [ # # ]: 0 : DEFINE_STUB(spdk_accel_append_copy, int,
29 : : (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
30 : : uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
31 : : struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
32 : : void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
33 [ # # ]: 0 : DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
34 : :
35 : : static bool g_memory_domain_pull_data_called;
36 : : static bool g_memory_domain_push_data_called;
37 : : static int g_accel_io_device;
38 : :
39 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
40 : : int
41 : 25 : spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
42 : : struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
43 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
44 : : {
45 : 25 : g_memory_domain_pull_data_called = true;
46 [ - + + + : 25 : HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
+ + ]
47 : 20 : cpl_cb(cpl_cb_arg, 0);
48 : 20 : return 0;
49 : : }
50 : :
51 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
52 : : int
53 : 25 : spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
54 : : struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
55 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
56 : : {
57 : 25 : g_memory_domain_push_data_called = true;
58 [ - + + + : 25 : HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
+ + ]
59 : 20 : cpl_cb(cpl_cb_arg, 0);
60 : 20 : return 0;
61 : : }
62 : :
63 : : struct spdk_io_channel *
64 : 175 : spdk_accel_get_io_channel(void)
65 : : {
66 : 175 : return spdk_get_io_channel(&g_accel_io_device);
67 : : }
68 : :
69 : : int g_status;
70 : : int g_count;
71 : : enum spdk_bdev_event_type g_event_type1;
72 : : enum spdk_bdev_event_type g_event_type2;
73 : : enum spdk_bdev_event_type g_event_type3;
74 : : enum spdk_bdev_event_type g_event_type4;
75 : : struct spdk_histogram_data *g_histogram;
76 : : void *g_unregister_arg;
77 : : int g_unregister_rc;
78 : :
79 : : void
80 : 0 : spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
81 : : int *sc, int *sk, int *asc, int *ascq)
82 : : {
83 : 0 : }
84 : :
85 : : static int
86 : 175 : ut_accel_ch_create_cb(void *io_device, void *ctx)
87 : : {
88 : 175 : return 0;
89 : : }
90 : :
91 : : static void
92 : 175 : ut_accel_ch_destroy_cb(void *io_device, void *ctx)
93 : : {
94 : 175 : }
95 : :
96 : : static int
97 : 5 : ut_bdev_setup(void)
98 : : {
99 : 5 : spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
100 : : ut_accel_ch_destroy_cb, 0, NULL);
101 : 5 : return 0;
102 : : }
103 : :
104 : : static int
105 : 5 : ut_bdev_teardown(void)
106 : : {
107 : 5 : spdk_io_device_unregister(&g_accel_io_device, NULL);
108 : :
109 : 5 : return 0;
110 : : }
111 : :
112 : : static int
113 : 400 : stub_destruct(void *ctx)
114 : : {
115 : 400 : return 0;
116 : : }
117 : :
118 : : struct ut_expected_io {
119 : : uint8_t type;
120 : : uint64_t offset;
121 : : uint64_t src_offset;
122 : : uint64_t length;
123 : : int iovcnt;
124 : : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
125 : : void *md_buf;
126 : : TAILQ_ENTRY(ut_expected_io) link;
127 : : };
128 : :
129 : : struct bdev_ut_io {
130 : : TAILQ_ENTRY(bdev_ut_io) link;
131 : : };
132 : :
133 : : struct bdev_ut_channel {
134 : : TAILQ_HEAD(, bdev_ut_io) outstanding_io;
135 : : uint32_t outstanding_io_count;
136 : : TAILQ_HEAD(, ut_expected_io) expected_io;
137 : : };
138 : :
139 : : static bool g_io_done;
140 : : static struct spdk_bdev_io *g_bdev_io;
141 : : static enum spdk_bdev_io_status g_io_status;
142 : : static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
143 : : static uint32_t g_bdev_ut_io_device;
144 : : static struct bdev_ut_channel *g_bdev_ut_channel;
145 : : static void *g_compare_read_buf;
146 : : static uint32_t g_compare_read_buf_len;
147 : : static void *g_compare_write_buf;
148 : : static uint32_t g_compare_write_buf_len;
149 : : static void *g_compare_md_buf;
150 : : static bool g_abort_done;
151 : : static enum spdk_bdev_io_status g_abort_status;
152 : : static void *g_zcopy_read_buf;
153 : : static uint32_t g_zcopy_read_buf_len;
154 : : static void *g_zcopy_write_buf;
155 : : static uint32_t g_zcopy_write_buf_len;
156 : : static struct spdk_bdev_io *g_zcopy_bdev_io;
157 : : static uint64_t g_seek_data_offset;
158 : : static uint64_t g_seek_hole_offset;
159 : : static uint64_t g_seek_offset;
160 : :
161 : : static struct ut_expected_io *
162 : 1285 : ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
163 : : {
164 : : struct ut_expected_io *expected_io;
165 : :
166 : 1285 : expected_io = calloc(1, sizeof(*expected_io));
167 [ - + ]: 1285 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
168 : :
169 : 1285 : expected_io->type = type;
170 : 1285 : expected_io->offset = offset;
171 : 1285 : expected_io->length = length;
172 : 1285 : expected_io->iovcnt = iovcnt;
173 : :
174 : 1285 : return expected_io;
175 : : }
176 : :
177 : : static struct ut_expected_io *
178 : 105 : ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length)
179 : : {
180 : : struct ut_expected_io *expected_io;
181 : :
182 : 105 : expected_io = calloc(1, sizeof(*expected_io));
183 [ - + ]: 105 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
184 : :
185 : 105 : expected_io->type = type;
186 : 105 : expected_io->offset = offset;
187 : 105 : expected_io->src_offset = src_offset;
188 : 105 : expected_io->length = length;
189 : :
190 : 105 : return expected_io;
191 : : }
192 : :
193 : : static void
194 : 2730 : ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
195 : : {
196 : 2730 : expected_io->iov[pos].iov_base = base;
197 : 2730 : expected_io->iov[pos].iov_len = len;
198 : 2730 : }
199 : :
200 : : static void
201 : 1995 : stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
202 : : {
203 : 1995 : struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
204 : : struct ut_expected_io *expected_io;
205 : : struct iovec *iov, *expected_iov;
206 : : struct spdk_bdev_io *bio_to_abort;
207 : : struct bdev_ut_io *bio;
208 : : int i;
209 : :
210 : 1995 : g_bdev_io = bdev_io;
211 : :
212 [ + + + + ]: 1995 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
213 : 55 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
214 : :
215 : 55 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
216 : 55 : CU_ASSERT(g_compare_read_buf_len == len);
217 [ - + - + ]: 55 : memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
218 [ + + + + : 55 : if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) {
+ - ]
219 [ - + - + ]: 15 : memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf,
220 : 15 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks);
221 : : }
222 : : }
223 : :
224 [ + + + + ]: 1995 : if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
225 : 5 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
226 : :
227 : 5 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
228 : 5 : CU_ASSERT(g_compare_write_buf_len == len);
229 [ - + - + ]: 5 : memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
230 : : }
231 : :
232 [ + + + + ]: 1995 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
233 : 45 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
234 : :
235 : 45 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
236 : 45 : CU_ASSERT(g_compare_read_buf_len == len);
237 [ + + - + : 45 : if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
+ + ]
238 : 20 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
239 : : }
240 [ + + ]: 45 : if (bdev_io->u.bdev.md_buf &&
241 [ - + - + : 15 : memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf,
+ + ]
242 [ + + ]: 15 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) {
243 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
244 : : }
245 : : }
246 : :
247 [ + + ]: 1995 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
248 [ + + ]: 40 : if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
249 [ + - ]: 35 : TAILQ_FOREACH(bio, &ch->outstanding_io, link) {
250 : 35 : bio_to_abort = spdk_bdev_io_from_ctx(bio);
251 [ + - ]: 35 : if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
252 [ + + ]: 35 : TAILQ_REMOVE(&ch->outstanding_io, bio, link);
253 : 35 : ch->outstanding_io_count--;
254 : 35 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
255 : 35 : break;
256 : : }
257 : : }
258 : : }
259 : : }
260 : :
261 [ + + ]: 1995 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) {
262 [ + + ]: 20 : if (bdev_io->u.bdev.zcopy.start) {
263 : 10 : g_zcopy_bdev_io = bdev_io;
264 [ + + ]: 10 : if (bdev_io->u.bdev.zcopy.populate) {
265 : : /* Start of a read */
266 : 5 : CU_ASSERT(g_zcopy_read_buf != NULL);
267 : 5 : CU_ASSERT(g_zcopy_read_buf_len > 0);
268 : 5 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf;
269 : 5 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len;
270 : 5 : bdev_io->u.bdev.iovcnt = 1;
271 : : } else {
272 : : /* Start of a write */
273 : 5 : CU_ASSERT(g_zcopy_write_buf != NULL);
274 : 5 : CU_ASSERT(g_zcopy_write_buf_len > 0);
275 : 5 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf;
276 : 5 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len;
277 : 5 : bdev_io->u.bdev.iovcnt = 1;
278 : : }
279 : : } else {
280 [ + + ]: 10 : if (bdev_io->u.bdev.zcopy.commit) {
281 : : /* End of write */
282 : 5 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf);
283 : 5 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len);
284 : 5 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
285 : 5 : g_zcopy_write_buf = NULL;
286 : 5 : g_zcopy_write_buf_len = 0;
287 : : } else {
288 : : /* End of read */
289 : 5 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf);
290 : 5 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len);
291 : 5 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
292 : 5 : g_zcopy_read_buf = NULL;
293 : 5 : g_zcopy_read_buf_len = 0;
294 : : }
295 : : }
296 : : }
297 : :
298 [ + + ]: 1995 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
299 : 5 : bdev_io->u.bdev.seek.offset = g_seek_data_offset;
300 : : }
301 : :
302 [ + + ]: 1995 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) {
303 : 5 : bdev_io->u.bdev.seek.offset = g_seek_hole_offset;
304 : : }
305 : :
306 : 1995 : TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link);
307 : 1995 : ch->outstanding_io_count++;
308 : :
309 : 1995 : expected_io = TAILQ_FIRST(&ch->expected_io);
310 [ + + ]: 1995 : if (expected_io == NULL) {
311 : 605 : return;
312 : : }
313 [ + + ]: 1390 : TAILQ_REMOVE(&ch->expected_io, expected_io, link);
314 : :
315 [ + - ]: 1390 : if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
316 : 1390 : CU_ASSERT(bdev_io->type == expected_io->type);
317 : : }
318 : :
319 [ + + ]: 1390 : if (expected_io->md_buf != NULL) {
320 : 140 : CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
321 : : }
322 : :
323 [ - + ]: 1390 : if (expected_io->length == 0) {
324 : 0 : free(expected_io);
325 : 0 : return;
326 : : }
327 : :
328 : 1390 : CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
329 : 1390 : CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
330 [ + + ]: 1390 : if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) {
331 : 105 : CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks);
332 : : }
333 : :
334 [ + + ]: 1390 : if (expected_io->iovcnt == 0) {
335 : 505 : free(expected_io);
336 : : /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */
337 : 505 : return;
338 : : }
339 : :
340 : 885 : CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
341 [ + + ]: 3615 : for (i = 0; i < expected_io->iovcnt; i++) {
342 : 2730 : expected_iov = &expected_io->iov[i];
343 [ + + ]: 2730 : if (bdev_io->internal.orig_iovcnt == 0) {
344 : 2710 : iov = &bdev_io->u.bdev.iovs[i];
345 : : } else {
346 : 20 : iov = bdev_io->internal.orig_iovs;
347 : : }
348 : 2730 : CU_ASSERT(iov->iov_len == expected_iov->iov_len);
349 : 2730 : CU_ASSERT(iov->iov_base == expected_iov->iov_base);
350 : : }
351 : :
352 : 885 : free(expected_io);
353 : : }
354 : :
355 : : static void
356 : 295 : stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
357 : : struct spdk_bdev_io *bdev_io, bool success)
358 : : {
359 : 295 : CU_ASSERT(success == true);
360 : :
361 : 295 : stub_submit_request(_ch, bdev_io);
362 : 295 : }
363 : :
364 : : static void
365 : 295 : stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
366 : : {
367 : 295 : spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
368 : 295 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
369 : 295 : }
370 : :
371 : : static uint32_t
372 : 860 : stub_complete_io(uint32_t num_to_complete)
373 : : {
374 : 860 : struct bdev_ut_channel *ch = g_bdev_ut_channel;
375 : : struct bdev_ut_io *bio;
376 : : struct spdk_bdev_io *bdev_io;
377 : : static enum spdk_bdev_io_status io_status;
378 : 860 : uint32_t num_completed = 0;
379 : :
380 [ + + ]: 2815 : while (num_completed < num_to_complete) {
381 [ + + ]: 1970 : if (TAILQ_EMPTY(&ch->outstanding_io)) {
382 : 15 : break;
383 : : }
384 : 1955 : bio = TAILQ_FIRST(&ch->outstanding_io);
385 [ + + ]: 1955 : TAILQ_REMOVE(&ch->outstanding_io, bio, link);
386 : 1955 : bdev_io = spdk_bdev_io_from_ctx(bio);
387 : 1955 : ch->outstanding_io_count--;
388 : 1955 : io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
389 : : g_io_exp_status;
390 : 1955 : spdk_bdev_io_complete(bdev_io, io_status);
391 : 1955 : num_completed++;
392 : : }
393 : :
394 : 860 : return num_completed;
395 : : }
396 : :
397 : : static struct spdk_io_channel *
398 : 175 : bdev_ut_get_io_channel(void *ctx)
399 : : {
400 : 175 : return spdk_get_io_channel(&g_bdev_ut_io_device);
401 : : }
402 : :
403 : : static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
404 : : [SPDK_BDEV_IO_TYPE_READ] = true,
405 : : [SPDK_BDEV_IO_TYPE_WRITE] = true,
406 : : [SPDK_BDEV_IO_TYPE_COMPARE] = true,
407 : : [SPDK_BDEV_IO_TYPE_UNMAP] = true,
408 : : [SPDK_BDEV_IO_TYPE_FLUSH] = true,
409 : : [SPDK_BDEV_IO_TYPE_RESET] = true,
410 : : [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
411 : : [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
412 : : [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
413 : : [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
414 : : [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
415 : : [SPDK_BDEV_IO_TYPE_ABORT] = true,
416 : : [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true,
417 : : [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true,
418 : : [SPDK_BDEV_IO_TYPE_COPY] = true,
419 : : };
420 : :
421 : : static void
422 : 110 : ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
423 : : {
424 : 110 : g_io_types_supported[io_type] = enable;
425 : 110 : }
426 : :
427 : : static bool
428 : 1500 : stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
429 : : {
430 [ - + ]: 1500 : return g_io_types_supported[io_type];
431 : : }
432 : :
433 : : static struct spdk_bdev_fn_table fn_table = {
434 : : .destruct = stub_destruct,
435 : : .submit_request = stub_submit_request,
436 : : .get_io_channel = bdev_ut_get_io_channel,
437 : : .io_type_supported = stub_io_type_supported,
438 : : };
439 : :
440 : : static int
441 : 175 : bdev_ut_create_ch(void *io_device, void *ctx_buf)
442 : : {
443 : 175 : struct bdev_ut_channel *ch = ctx_buf;
444 : :
445 : 175 : CU_ASSERT(g_bdev_ut_channel == NULL);
446 : 175 : g_bdev_ut_channel = ch;
447 : :
448 : 175 : TAILQ_INIT(&ch->outstanding_io);
449 : 175 : ch->outstanding_io_count = 0;
450 : 175 : TAILQ_INIT(&ch->expected_io);
451 : 175 : return 0;
452 : : }
453 : :
454 : : static void
455 : 175 : bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
456 : : {
457 : 175 : CU_ASSERT(g_bdev_ut_channel != NULL);
458 : 175 : g_bdev_ut_channel = NULL;
459 : 175 : }
460 : :
461 : : struct spdk_bdev_module bdev_ut_if;
462 : :
463 : : static int
464 : 200 : bdev_ut_module_init(void)
465 : : {
466 : 200 : spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
467 : : sizeof(struct bdev_ut_channel), NULL);
468 : 200 : spdk_bdev_module_init_done(&bdev_ut_if);
469 : 200 : return 0;
470 : : }
471 : :
472 : : static void
473 : 200 : bdev_ut_module_fini(void)
474 : : {
475 : 200 : spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
476 : 200 : }
477 : :
478 : : struct spdk_bdev_module bdev_ut_if = {
479 : : .name = "bdev_ut",
480 : : .module_init = bdev_ut_module_init,
481 : : .module_fini = bdev_ut_module_fini,
482 : : .async_init = true,
483 : : };
484 : :
485 : : static void vbdev_ut_examine_config(struct spdk_bdev *bdev);
486 : : static void vbdev_ut_examine_disk(struct spdk_bdev *bdev);
487 : :
488 : : static int
489 : 200 : vbdev_ut_module_init(void)
490 : : {
491 : 200 : return 0;
492 : : }
493 : :
494 : : static void
495 : 600 : vbdev_ut_module_fini(void)
496 : : {
497 : 600 : }
498 : :
499 : : static int
500 : 400 : vbdev_ut_get_ctx_size(void)
501 : : {
502 : 400 : return sizeof(struct bdev_ut_io);
503 : : }
504 : :
505 : : struct spdk_bdev_module vbdev_ut_if = {
506 : : .name = "vbdev_ut",
507 : : .module_init = vbdev_ut_module_init,
508 : : .module_fini = vbdev_ut_module_fini,
509 : : .examine_config = vbdev_ut_examine_config,
510 : : .examine_disk = vbdev_ut_examine_disk,
511 : : .get_ctx_size = vbdev_ut_get_ctx_size,
512 : : };
513 : :
514 : 5 : SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
515 : 5 : SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
516 : :
517 : : struct ut_examine_ctx {
518 : : void (*examine_config)(struct spdk_bdev *bdev);
519 : : void (*examine_disk)(struct spdk_bdev *bdev);
520 : : uint32_t examine_config_count;
521 : : uint32_t examine_disk_count;
522 : : };
523 : :
524 : : static void
525 : 400 : vbdev_ut_examine_config(struct spdk_bdev *bdev)
526 : : {
527 : 400 : struct ut_examine_ctx *ctx = bdev->ctxt;
528 : :
529 [ + + ]: 400 : if (ctx != NULL) {
530 : 15 : ctx->examine_config_count++;
531 [ + - ]: 15 : if (ctx->examine_config != NULL) {
532 : 15 : ctx->examine_config(bdev);
533 : : }
534 : : }
535 : :
536 : 400 : spdk_bdev_module_examine_done(&vbdev_ut_if);
537 : 400 : }
538 : :
539 : : static void
540 : 385 : vbdev_ut_examine_disk(struct spdk_bdev *bdev)
541 : : {
542 : 385 : struct ut_examine_ctx *ctx = bdev->ctxt;
543 : :
544 [ + + ]: 385 : if (ctx != NULL) {
545 : 15 : ctx->examine_disk_count++;
546 [ + - ]: 15 : if (ctx->examine_disk != NULL) {
547 : 15 : ctx->examine_disk(bdev);
548 : : }
549 : : }
550 : :
551 : 385 : spdk_bdev_module_examine_done(&vbdev_ut_if);
552 : 385 : }
553 : :
554 : : static void
555 : 200 : bdev_init_cb(void *arg, int rc)
556 : : {
557 : 200 : CU_ASSERT(rc == 0);
558 : 200 : }
559 : :
560 : : static void
561 : 400 : bdev_fini_cb(void *arg)
562 : : {
563 : 400 : }
564 : :
565 : : static void
566 : 200 : ut_init_bdev(struct spdk_bdev_opts *opts)
567 : : {
568 : : int rc;
569 : :
570 [ + + ]: 200 : if (opts != NULL) {
571 : 60 : rc = spdk_bdev_set_opts(opts);
572 : 60 : CU_ASSERT(rc == 0);
573 : : }
574 : 200 : rc = spdk_iobuf_initialize();
575 : 200 : CU_ASSERT(rc == 0);
576 : 200 : spdk_bdev_initialize(bdev_init_cb, NULL);
577 : 200 : poll_threads();
578 : 200 : }
579 : :
580 : : static void
581 : 200 : ut_fini_bdev(void)
582 : : {
583 : 200 : spdk_bdev_finish(bdev_fini_cb, NULL);
584 : 200 : spdk_iobuf_finish(bdev_fini_cb, NULL);
585 : 200 : poll_threads();
586 : 200 : }
587 : :
588 : : static struct spdk_bdev *
589 : 360 : allocate_bdev_ctx(char *name, void *ctx)
590 : : {
591 : : struct spdk_bdev *bdev;
592 : : int rc;
593 : :
594 : 360 : bdev = calloc(1, sizeof(*bdev));
595 [ - + ]: 360 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
596 : :
597 : 360 : bdev->ctxt = ctx;
598 : 360 : bdev->name = name;
599 : 360 : bdev->fn_table = &fn_table;
600 : 360 : bdev->module = &bdev_ut_if;
601 : 360 : bdev->blockcnt = 1024;
602 : 360 : bdev->blocklen = 512;
603 : :
604 : 360 : spdk_uuid_generate(&bdev->uuid);
605 : :
606 : 360 : rc = spdk_bdev_register(bdev);
607 : 360 : poll_threads();
608 : 360 : CU_ASSERT(rc == 0);
609 : :
610 : 360 : return bdev;
611 : : }
612 : :
613 : : static struct spdk_bdev *
614 : 345 : allocate_bdev(char *name)
615 : : {
616 : 345 : return allocate_bdev_ctx(name, NULL);
617 : : }
618 : :
619 : : static struct spdk_bdev *
620 : 25 : allocate_vbdev(char *name)
621 : : {
622 : : struct spdk_bdev *bdev;
623 : : int rc;
624 : :
625 : 25 : bdev = calloc(1, sizeof(*bdev));
626 [ - + ]: 25 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
627 : :
628 : 25 : bdev->name = name;
629 : 25 : bdev->fn_table = &fn_table;
630 : 25 : bdev->module = &vbdev_ut_if;
631 : 25 : bdev->blockcnt = 1024;
632 : 25 : bdev->blocklen = 512;
633 : :
634 : 25 : rc = spdk_bdev_register(bdev);
635 : 25 : poll_threads();
636 : 25 : CU_ASSERT(rc == 0);
637 : :
638 : 25 : return bdev;
639 : : }
640 : :
641 : : static void
642 : 345 : free_bdev(struct spdk_bdev *bdev)
643 : : {
644 : 345 : spdk_bdev_unregister(bdev, NULL, NULL);
645 : 345 : poll_threads();
646 [ - + ]: 345 : memset(bdev, 0xFF, sizeof(*bdev));
647 : 345 : free(bdev);
648 : 345 : }
649 : :
650 : : static void
651 : 25 : free_vbdev(struct spdk_bdev *bdev)
652 : : {
653 : 25 : spdk_bdev_unregister(bdev, NULL, NULL);
654 : 25 : poll_threads();
655 [ - + ]: 25 : memset(bdev, 0xFF, sizeof(*bdev));
656 : 25 : free(bdev);
657 : 25 : }
658 : :
659 : : static void
660 : 5 : get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
661 : : {
662 : : const char *bdev_name;
663 : :
664 : 5 : CU_ASSERT(bdev != NULL);
665 : 5 : CU_ASSERT(rc == 0);
666 : 5 : bdev_name = spdk_bdev_get_name(bdev);
667 [ - + ]: 5 : CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
668 : :
669 : 5 : free(stat);
670 : :
671 : 5 : *(bool *)cb_arg = true;
672 : 5 : }
673 : :
674 : : static void
675 : 15 : bdev_unregister_cb(void *cb_arg, int rc)
676 : : {
677 : 15 : g_unregister_arg = cb_arg;
678 : 15 : g_unregister_rc = rc;
679 : 15 : }
680 : :
681 : : static void
682 : 5 : bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
683 : : {
684 : 5 : }
685 : :
686 : : static void
687 : 20 : bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
688 : : {
689 : 20 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
690 : :
691 : 20 : g_event_type1 = type;
692 [ + + ]: 20 : if (SPDK_BDEV_EVENT_REMOVE == type) {
693 : 10 : spdk_bdev_close(desc);
694 : : }
695 : 20 : }
696 : :
697 : : static void
698 : 10 : bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
699 : : {
700 : 10 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
701 : :
702 : 10 : g_event_type2 = type;
703 [ + - ]: 10 : if (SPDK_BDEV_EVENT_REMOVE == type) {
704 : 10 : spdk_bdev_close(desc);
705 : : }
706 : 10 : }
707 : :
708 : : static void
709 : 5 : bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
710 : : {
711 : 5 : g_event_type3 = type;
712 : 5 : }
713 : :
714 : : static void
715 : 5 : bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
716 : : {
717 : 5 : g_event_type4 = type;
718 : 5 : }
719 : :
720 : : static void
721 : 20 : bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
722 : : {
723 : 20 : g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io);
724 : 20 : spdk_bdev_free_io(bdev_io);
725 : 20 : }
726 : :
727 : : static void
728 : 5 : get_device_stat_test(void)
729 : : {
730 : : struct spdk_bdev *bdev;
731 : : struct spdk_bdev_io_stat *stat;
732 : 4 : bool done;
733 : :
734 : 5 : bdev = allocate_bdev("bdev0");
735 : 5 : stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
736 [ - + ]: 5 : if (stat == NULL) {
737 : 0 : free_bdev(bdev);
738 : 0 : return;
739 : : }
740 : :
741 : 5 : done = false;
742 : 5 : spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
743 [ + + + + ]: 10 : while (!done) { poll_threads(); }
744 : :
745 : 5 : free_bdev(bdev);
746 : : }
747 : :
748 : : static void
749 : 5 : open_write_test(void)
750 : : {
751 : : struct spdk_bdev *bdev[9];
752 : 5 : struct spdk_bdev_desc *desc[9] = {};
753 : : int rc;
754 : :
755 : 5 : ut_init_bdev(NULL);
756 : :
757 : : /*
758 : : * Create a tree of bdevs to test various open w/ write cases.
759 : : *
760 : : * bdev0 through bdev3 are physical block devices, such as NVMe
761 : : * namespaces or Ceph block devices.
762 : : *
763 : : * bdev4 is a virtual bdev with multiple base bdevs. This models
764 : : * caching or RAID use cases.
765 : : *
766 : : * bdev5 through bdev7 are all virtual bdevs with the same base
767 : : * bdev (except bdev7). This models partitioning or logical volume
768 : : * use cases.
769 : : *
770 : : * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
771 : : * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
772 : : * models caching, RAID, partitioning or logical volumes use cases.
773 : : *
774 : : * bdev8 is a virtual bdev with multiple base bdevs, but these
775 : : * base bdevs are themselves virtual bdevs.
776 : : *
777 : : * bdev8
778 : : * |
779 : : * +----------+
780 : : * | |
781 : : * bdev4 bdev5 bdev6 bdev7
782 : : * | | | |
783 : : * +---+---+ +---+ + +---+---+
784 : : * | | \ | / \
785 : : * bdev0 bdev1 bdev2 bdev3
786 : : */
787 : :
788 : 5 : bdev[0] = allocate_bdev("bdev0");
789 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
790 : 5 : CU_ASSERT(rc == 0);
791 : :
792 : 5 : bdev[1] = allocate_bdev("bdev1");
793 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
794 : 5 : CU_ASSERT(rc == 0);
795 : :
796 : 5 : bdev[2] = allocate_bdev("bdev2");
797 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
798 : 5 : CU_ASSERT(rc == 0);
799 : :
800 : 5 : bdev[3] = allocate_bdev("bdev3");
801 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
802 : 5 : CU_ASSERT(rc == 0);
803 : :
804 : 5 : bdev[4] = allocate_vbdev("bdev4");
805 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
806 : 5 : CU_ASSERT(rc == 0);
807 : :
808 : 5 : bdev[5] = allocate_vbdev("bdev5");
809 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
810 : 5 : CU_ASSERT(rc == 0);
811 : :
812 : 5 : bdev[6] = allocate_vbdev("bdev6");
813 : :
814 : 5 : bdev[7] = allocate_vbdev("bdev7");
815 : :
816 : 5 : bdev[8] = allocate_vbdev("bdev8");
817 : :
818 : : /* Open bdev0 read-only. This should succeed. */
819 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]);
820 : 5 : CU_ASSERT(rc == 0);
821 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
822 : 5 : CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0]));
823 : 5 : spdk_bdev_close(desc[0]);
824 : :
825 : : /*
826 : : * Open bdev1 read/write. This should fail since bdev1 has been claimed
827 : : * by a vbdev module.
828 : : */
829 : 5 : rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]);
830 : 5 : CU_ASSERT(rc == -EPERM);
831 : :
832 : : /*
833 : : * Open bdev4 read/write. This should fail since bdev3 has been claimed
834 : : * by a vbdev module.
835 : : */
836 : 5 : rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]);
837 : 5 : CU_ASSERT(rc == -EPERM);
838 : :
839 : : /* Open bdev4 read-only. This should succeed. */
840 : 5 : rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]);
841 : 5 : CU_ASSERT(rc == 0);
842 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
843 : 5 : CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4]));
844 : 5 : spdk_bdev_close(desc[4]);
845 : :
846 : : /*
847 : : * Open bdev8 read/write. This should succeed since it is a leaf
848 : : * bdev.
849 : : */
850 : 5 : rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]);
851 : 5 : CU_ASSERT(rc == 0);
852 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
853 : 5 : CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8]));
854 : 5 : spdk_bdev_close(desc[8]);
855 : :
856 : : /*
857 : : * Open bdev5 read/write. This should fail since bdev4 has been claimed
858 : : * by a vbdev module.
859 : : */
860 : 5 : rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]);
861 : 5 : CU_ASSERT(rc == -EPERM);
862 : :
863 : : /* Open bdev4 read-only. This should succeed. */
864 : 5 : rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]);
865 : 5 : CU_ASSERT(rc == 0);
866 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
867 : 5 : CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5]));
868 : 5 : spdk_bdev_close(desc[5]);
869 : :
870 : 5 : free_vbdev(bdev[8]);
871 : :
872 : 5 : free_vbdev(bdev[5]);
873 : 5 : free_vbdev(bdev[6]);
874 : 5 : free_vbdev(bdev[7]);
875 : :
876 : 5 : free_vbdev(bdev[4]);
877 : :
878 : 5 : free_bdev(bdev[0]);
879 : 5 : free_bdev(bdev[1]);
880 : 5 : free_bdev(bdev[2]);
881 : 5 : free_bdev(bdev[3]);
882 : :
883 : 5 : ut_fini_bdev();
884 : 5 : }
885 : :
886 : : static void
887 : 5 : claim_test(void)
888 : : {
889 : : struct spdk_bdev *bdev;
890 : 4 : struct spdk_bdev_desc *desc, *open_desc;
891 : : int rc;
892 : : uint32_t count;
893 : :
894 : 5 : ut_init_bdev(NULL);
895 : :
896 : : /*
897 : : * A vbdev that uses a read-only bdev may need it to remain read-only.
898 : : * To do so, it opens the bdev read-only, then claims it without
899 : : * passing a spdk_bdev_desc.
900 : : */
901 : 5 : bdev = allocate_bdev("bdev0");
902 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
903 : 5 : CU_ASSERT(rc == 0);
904 [ - + ]: 5 : CU_ASSERT(desc->write == false);
905 : :
906 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
907 : 5 : CU_ASSERT(rc == 0);
908 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
909 : 5 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
910 : :
911 : : /* There should be only one open descriptor and it should still be ro */
912 : 5 : count = 0;
913 [ + + ]: 10 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
914 : 5 : CU_ASSERT(open_desc == desc);
915 [ - + ]: 5 : CU_ASSERT(!open_desc->write);
916 : 5 : count++;
917 : : }
918 : 5 : CU_ASSERT(count == 1);
919 : :
920 : : /* A read-only bdev is upgraded to read-write if desc is passed. */
921 : 5 : spdk_bdev_module_release_bdev(bdev);
922 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if);
923 : 5 : CU_ASSERT(rc == 0);
924 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
925 : 5 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
926 : :
927 : : /* There should be only one open descriptor and it should be rw */
928 : 5 : count = 0;
929 [ + + ]: 10 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
930 : 5 : CU_ASSERT(open_desc == desc);
931 [ - + ]: 5 : CU_ASSERT(open_desc->write);
932 : 5 : count++;
933 : : }
934 : 5 : CU_ASSERT(count == 1);
935 : :
936 : 5 : spdk_bdev_close(desc);
937 : 5 : free_bdev(bdev);
938 : 5 : ut_fini_bdev();
939 : 5 : }
940 : :
941 : : static void
942 : 5 : bytes_to_blocks_test(void)
943 : : {
944 : 4 : struct spdk_bdev bdev;
945 : 4 : uint64_t offset_blocks, num_blocks;
946 : :
947 [ - + ]: 5 : memset(&bdev, 0, sizeof(bdev));
948 : :
949 : 5 : bdev.blocklen = 512;
950 : :
951 : : /* All parameters valid */
952 : 5 : offset_blocks = 0;
953 : 5 : num_blocks = 0;
954 : 5 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
955 : 5 : CU_ASSERT(offset_blocks == 1);
956 : 5 : CU_ASSERT(num_blocks == 2);
957 : :
958 : : /* Offset not a block multiple */
959 : 5 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
960 : :
961 : : /* Length not a block multiple */
962 : 5 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
963 : :
964 : : /* In case blocklen not the power of two */
965 : 5 : bdev.blocklen = 100;
966 : 5 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
967 : 5 : CU_ASSERT(offset_blocks == 1);
968 : 5 : CU_ASSERT(num_blocks == 2);
969 : :
970 : : /* Offset not a block multiple */
971 : 5 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
972 : :
973 : : /* Length not a block multiple */
974 : 5 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
975 : 5 : }
976 : :
977 : : static void
978 : 5 : num_blocks_test(void)
979 : : {
980 : : struct spdk_bdev *bdev;
981 : 5 : struct spdk_bdev_desc *desc = NULL;
982 : : int rc;
983 : :
984 : 5 : ut_init_bdev(NULL);
985 : 5 : bdev = allocate_bdev("num_blocks");
986 : :
987 : 5 : spdk_bdev_notify_blockcnt_change(bdev, 50);
988 : :
989 : : /* Growing block number */
990 : 5 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0);
991 : : /* Shrinking block number */
992 : 5 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0);
993 : :
994 : 5 : rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc);
995 : 5 : CU_ASSERT(rc == 0);
996 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
997 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
998 : :
999 : : /* Growing block number */
1000 : 5 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0);
1001 : : /* Shrinking block number */
1002 : 5 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0);
1003 : :
1004 : 5 : g_event_type1 = 0xFF;
1005 : : /* Growing block number */
1006 : 5 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0);
1007 : :
1008 : 5 : poll_threads();
1009 : 5 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
1010 : :
1011 : 5 : g_event_type1 = 0xFF;
1012 : : /* Growing block number and closing */
1013 : 5 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0);
1014 : :
1015 : 5 : spdk_bdev_close(desc);
1016 : 5 : free_bdev(bdev);
1017 : 5 : ut_fini_bdev();
1018 : :
1019 : 5 : poll_threads();
1020 : :
1021 : : /* Callback is not called for closed device */
1022 : 5 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
1023 : 5 : }
1024 : :
1025 : : static void
1026 : 5 : io_valid_test(void)
1027 : : {
1028 : 4 : struct spdk_bdev bdev;
1029 : :
1030 [ - + ]: 5 : memset(&bdev, 0, sizeof(bdev));
1031 : :
1032 : 5 : bdev.blocklen = 512;
1033 : 5 : spdk_spin_init(&bdev.internal.spinlock);
1034 : :
1035 : 5 : spdk_bdev_notify_blockcnt_change(&bdev, 100);
1036 : :
1037 : : /* All parameters valid */
1038 : 5 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
1039 : :
1040 : : /* Last valid block */
1041 : 5 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
1042 : :
1043 : : /* Offset past end of bdev */
1044 : 5 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
1045 : :
1046 : : /* Offset + length past end of bdev */
1047 : 5 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
1048 : :
1049 : : /* Offset near end of uint64_t range (2^64 - 1) */
1050 : 5 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
1051 : :
1052 : 5 : spdk_spin_destroy(&bdev.internal.spinlock);
1053 : 5 : }
1054 : :
1055 : : static void
1056 : 5 : alias_add_del_test(void)
1057 : : {
1058 : : struct spdk_bdev *bdev[3];
1059 : : int rc;
1060 : :
1061 : 5 : ut_init_bdev(NULL);
1062 : :
1063 : : /* Creating and registering bdevs */
1064 : 5 : bdev[0] = allocate_bdev("bdev0");
1065 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
1066 : :
1067 : 5 : bdev[1] = allocate_bdev("bdev1");
1068 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
1069 : :
1070 : 5 : bdev[2] = allocate_bdev("bdev2");
1071 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
1072 : :
1073 : 5 : poll_threads();
1074 : :
1075 : : /*
1076 : : * Trying adding an alias identical to name.
1077 : : * Alias is identical to name, so it can not be added to aliases list
1078 : : */
1079 : 5 : rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
1080 : 5 : CU_ASSERT(rc == -EEXIST);
1081 : :
1082 : : /*
1083 : : * Trying to add empty alias,
1084 : : * this one should fail
1085 : : */
1086 : 5 : rc = spdk_bdev_alias_add(bdev[0], NULL);
1087 : 5 : CU_ASSERT(rc == -EINVAL);
1088 : :
1089 : : /* Trying adding same alias to two different registered bdevs */
1090 : :
1091 : : /* Alias is used first time, so this one should pass */
1092 : 5 : rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
1093 : 5 : CU_ASSERT(rc == 0);
1094 : :
1095 : : /* Alias was added to another bdev, so this one should fail */
1096 : 5 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
1097 : 5 : CU_ASSERT(rc == -EEXIST);
1098 : :
1099 : : /* Alias is used first time, so this one should pass */
1100 : 5 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
1101 : 5 : CU_ASSERT(rc == 0);
1102 : :
1103 : : /* Trying removing an alias from registered bdevs */
1104 : :
1105 : : /* Alias is not on a bdev aliases list, so this one should fail */
1106 : 5 : rc = spdk_bdev_alias_del(bdev[0], "not existing");
1107 : 5 : CU_ASSERT(rc == -ENOENT);
1108 : :
1109 : : /* Alias is present on a bdev aliases list, so this one should pass */
1110 : 5 : rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
1111 : 5 : CU_ASSERT(rc == 0);
1112 : :
1113 : : /* Alias is present on a bdev aliases list, so this one should pass */
1114 : 5 : rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
1115 : 5 : CU_ASSERT(rc == 0);
1116 : :
1117 : : /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
1118 : 5 : rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
1119 : 5 : CU_ASSERT(rc != 0);
1120 : :
1121 : : /* Trying to del all alias from empty alias list */
1122 : 5 : spdk_bdev_alias_del_all(bdev[2]);
1123 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
1124 : :
1125 : : /* Trying to del all alias from non-empty alias list */
1126 : 5 : rc = spdk_bdev_alias_add(bdev[2], "alias0");
1127 : 5 : CU_ASSERT(rc == 0);
1128 : 5 : rc = spdk_bdev_alias_add(bdev[2], "alias1");
1129 : 5 : CU_ASSERT(rc == 0);
1130 : 5 : spdk_bdev_alias_del_all(bdev[2]);
1131 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
1132 : :
1133 : : /* Unregister and free bdevs */
1134 : 5 : spdk_bdev_unregister(bdev[0], NULL, NULL);
1135 : 5 : spdk_bdev_unregister(bdev[1], NULL, NULL);
1136 : 5 : spdk_bdev_unregister(bdev[2], NULL, NULL);
1137 : :
1138 : 5 : poll_threads();
1139 : :
1140 : 5 : free(bdev[0]);
1141 : 5 : free(bdev[1]);
1142 : 5 : free(bdev[2]);
1143 : :
1144 : 5 : ut_fini_bdev();
1145 : 5 : }
1146 : :
1147 : : static void
1148 : 740 : io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1149 : : {
1150 : 740 : g_io_done = true;
1151 : 740 : g_io_status = bdev_io->internal.status;
1152 [ + + + + ]: 740 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) &&
1153 : : (bdev_io->u.bdev.zcopy.start)) {
1154 : 10 : g_zcopy_bdev_io = bdev_io;
1155 : : } else {
1156 : 730 : spdk_bdev_free_io(bdev_io);
1157 : 730 : g_zcopy_bdev_io = NULL;
1158 : : }
1159 : 740 : }
1160 : :
1161 : : struct bdev_ut_io_wait_entry {
1162 : : struct spdk_bdev_io_wait_entry entry;
1163 : : struct spdk_io_channel *io_ch;
1164 : : struct spdk_bdev_desc *desc;
1165 : : bool submitted;
1166 : : };
1167 : :
1168 : : static void
1169 : 10 : io_wait_cb(void *arg)
1170 : : {
1171 : 10 : struct bdev_ut_io_wait_entry *entry = arg;
1172 : : int rc;
1173 : :
1174 : 10 : rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
1175 : 10 : CU_ASSERT(rc == 0);
1176 : 10 : entry->submitted = true;
1177 : 10 : }
1178 : :
1179 : : static void
1180 : 5 : bdev_io_types_test(void)
1181 : : {
1182 : : struct spdk_bdev *bdev;
1183 : 5 : struct spdk_bdev_desc *desc = NULL;
1184 : : struct spdk_io_channel *io_ch;
1185 : 5 : struct spdk_bdev_opts bdev_opts = {};
1186 : : int rc;
1187 : :
1188 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1189 : 5 : bdev_opts.bdev_io_pool_size = 4;
1190 : 5 : bdev_opts.bdev_io_cache_size = 2;
1191 : 5 : ut_init_bdev(&bdev_opts);
1192 : :
1193 : 5 : bdev = allocate_bdev("bdev0");
1194 : :
1195 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1196 : 5 : CU_ASSERT(rc == 0);
1197 : 5 : poll_threads();
1198 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1199 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1200 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
1201 : 5 : CU_ASSERT(io_ch != NULL);
1202 : :
1203 : : /* WRITE and WRITE ZEROES are not supported */
1204 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
1205 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
1206 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
1207 : 5 : CU_ASSERT(rc == -ENOTSUP);
1208 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
1209 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
1210 : :
1211 : : /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */
1212 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false);
1213 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false);
1214 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false);
1215 : 5 : rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1216 : 5 : CU_ASSERT(rc == -ENOTSUP);
1217 : 5 : rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL);
1218 : 5 : CU_ASSERT(rc == -ENOTSUP);
1219 : 5 : rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1220 : 5 : CU_ASSERT(rc == -ENOTSUP);
1221 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true);
1222 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true);
1223 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true);
1224 : :
1225 : 5 : spdk_put_io_channel(io_ch);
1226 : 5 : spdk_bdev_close(desc);
1227 : 5 : free_bdev(bdev);
1228 : 5 : ut_fini_bdev();
1229 : 5 : }
1230 : :
1231 : : static void
1232 : 5 : bdev_io_wait_test(void)
1233 : : {
1234 : : struct spdk_bdev *bdev;
1235 : 5 : struct spdk_bdev_desc *desc = NULL;
1236 : : struct spdk_io_channel *io_ch;
1237 : 5 : struct spdk_bdev_opts bdev_opts = {};
1238 : 4 : struct bdev_ut_io_wait_entry io_wait_entry;
1239 : 4 : struct bdev_ut_io_wait_entry io_wait_entry2;
1240 : : int rc;
1241 : :
1242 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1243 : 5 : bdev_opts.bdev_io_pool_size = 4;
1244 : 5 : bdev_opts.bdev_io_cache_size = 2;
1245 : 5 : ut_init_bdev(&bdev_opts);
1246 : :
1247 : 5 : bdev = allocate_bdev("bdev0");
1248 : :
1249 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1250 : 5 : CU_ASSERT(rc == 0);
1251 : 5 : poll_threads();
1252 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1253 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1254 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
1255 : 5 : CU_ASSERT(io_ch != NULL);
1256 : :
1257 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1258 : 5 : CU_ASSERT(rc == 0);
1259 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1260 : 5 : CU_ASSERT(rc == 0);
1261 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1262 : 5 : CU_ASSERT(rc == 0);
1263 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1264 : 5 : CU_ASSERT(rc == 0);
1265 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1266 : :
1267 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1268 : 5 : CU_ASSERT(rc == -ENOMEM);
1269 : :
1270 : 5 : io_wait_entry.entry.bdev = bdev;
1271 : 5 : io_wait_entry.entry.cb_fn = io_wait_cb;
1272 : 5 : io_wait_entry.entry.cb_arg = &io_wait_entry;
1273 : 5 : io_wait_entry.io_ch = io_ch;
1274 : 5 : io_wait_entry.desc = desc;
1275 : 5 : io_wait_entry.submitted = false;
1276 : : /* Cannot use the same io_wait_entry for two different calls. */
1277 : 5 : memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
1278 : 5 : io_wait_entry2.entry.cb_arg = &io_wait_entry2;
1279 : :
1280 : : /* Queue two I/O waits. */
1281 : 5 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
1282 : 5 : CU_ASSERT(rc == 0);
1283 [ - + ]: 5 : CU_ASSERT(io_wait_entry.submitted == false);
1284 : 5 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
1285 : 5 : CU_ASSERT(rc == 0);
1286 [ - + ]: 5 : CU_ASSERT(io_wait_entry2.submitted == false);
1287 : :
1288 : 5 : stub_complete_io(1);
1289 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1290 [ - + ]: 5 : CU_ASSERT(io_wait_entry.submitted == true);
1291 [ - + ]: 5 : CU_ASSERT(io_wait_entry2.submitted == false);
1292 : :
1293 : 5 : stub_complete_io(1);
1294 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1295 [ - + ]: 5 : CU_ASSERT(io_wait_entry2.submitted == true);
1296 : :
1297 : 5 : stub_complete_io(4);
1298 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1299 : :
1300 : 5 : spdk_put_io_channel(io_ch);
1301 : 5 : spdk_bdev_close(desc);
1302 : 5 : free_bdev(bdev);
1303 : 5 : ut_fini_bdev();
1304 : 5 : }
1305 : :
1306 : : static void
1307 : 5 : bdev_io_spans_split_test(void)
1308 : : {
1309 : 4 : struct spdk_bdev bdev;
1310 : 4 : struct spdk_bdev_io bdev_io;
1311 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
1312 : :
1313 [ - + ]: 5 : memset(&bdev, 0, sizeof(bdev));
1314 : 5 : bdev_io.u.bdev.iovs = iov;
1315 : :
1316 : 5 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1317 : 5 : bdev.optimal_io_boundary = 0;
1318 : 5 : bdev.max_segment_size = 0;
1319 : 5 : bdev.max_num_segments = 0;
1320 : 5 : bdev_io.bdev = &bdev;
1321 : :
1322 : : /* bdev has no optimal_io_boundary and max_size set - so this should return false. */
1323 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1324 : :
1325 : 5 : bdev.split_on_optimal_io_boundary = true;
1326 : 5 : bdev.optimal_io_boundary = 32;
1327 : 5 : bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
1328 : :
1329 : : /* RESETs are not based on LBAs - so this should return false. */
1330 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1331 : :
1332 : 5 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1333 : 5 : bdev_io.u.bdev.offset_blocks = 0;
1334 : 5 : bdev_io.u.bdev.num_blocks = 32;
1335 : :
1336 : : /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
1337 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1338 : :
1339 : 5 : bdev_io.u.bdev.num_blocks = 33;
1340 : :
1341 : : /* This I/O spans a boundary. */
1342 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1343 : :
1344 : 5 : bdev_io.u.bdev.num_blocks = 32;
1345 : 5 : bdev.max_segment_size = 512 * 32;
1346 : 5 : bdev.max_num_segments = 1;
1347 : 5 : bdev_io.u.bdev.iovcnt = 1;
1348 : 5 : iov[0].iov_len = 512;
1349 : :
1350 : : /* Does not cross and exceed max_size or max_segs */
1351 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1352 : :
1353 : 5 : bdev.split_on_optimal_io_boundary = false;
1354 : 5 : bdev.max_segment_size = 512;
1355 : 5 : bdev.max_num_segments = 1;
1356 : 5 : bdev_io.u.bdev.iovcnt = 2;
1357 : :
1358 : : /* Exceed max_segs */
1359 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1360 : :
1361 : 5 : bdev.max_num_segments = 2;
1362 : 5 : iov[0].iov_len = 513;
1363 : 5 : iov[1].iov_len = 512;
1364 : :
1365 : : /* Exceed max_sizes */
1366 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1367 : :
1368 : 5 : bdev.max_segment_size = 0;
1369 : 5 : bdev.write_unit_size = 32;
1370 : 5 : bdev.split_on_write_unit = true;
1371 : 5 : bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE;
1372 : :
1373 : : /* This I/O is one write unit */
1374 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1375 : :
1376 : 5 : bdev_io.u.bdev.num_blocks = 32 * 2;
1377 : :
1378 : : /* This I/O is more than one write unit */
1379 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1380 : :
1381 : 5 : bdev_io.u.bdev.offset_blocks = 1;
1382 : 5 : bdev_io.u.bdev.num_blocks = 32;
1383 : :
1384 : : /* This I/O is not aligned to write unit size */
1385 : 5 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1386 : 5 : }
1387 : :
1388 : : static void
1389 : 5 : bdev_io_boundary_split_test(void)
1390 : : {
1391 : : struct spdk_bdev *bdev;
1392 : 5 : struct spdk_bdev_desc *desc = NULL;
1393 : : struct spdk_io_channel *io_ch;
1394 : 5 : struct spdk_bdev_opts bdev_opts = {};
1395 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
1396 : : struct ut_expected_io *expected_io;
1397 : 5 : void *md_buf = (void *)0xFF000000;
1398 : : uint64_t i;
1399 : : int rc;
1400 : :
1401 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1402 : 5 : bdev_opts.bdev_io_pool_size = 512;
1403 : 5 : bdev_opts.bdev_io_cache_size = 64;
1404 : 5 : ut_init_bdev(&bdev_opts);
1405 : :
1406 : 5 : bdev = allocate_bdev("bdev0");
1407 : :
1408 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1409 : 5 : CU_ASSERT(rc == 0);
1410 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1411 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1412 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
1413 : 5 : CU_ASSERT(io_ch != NULL);
1414 : :
1415 : 5 : bdev->optimal_io_boundary = 16;
1416 : 5 : bdev->split_on_optimal_io_boundary = false;
1417 : :
1418 : 5 : g_io_done = false;
1419 : :
1420 : : /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
1421 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
1422 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
1423 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1424 : :
1425 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1426 : 5 : CU_ASSERT(rc == 0);
1427 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1428 : :
1429 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1430 : 5 : stub_complete_io(1);
1431 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1432 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1433 : :
1434 : 5 : bdev->split_on_optimal_io_boundary = true;
1435 : 5 : bdev->md_interleave = false;
1436 : 5 : bdev->md_len = 8;
1437 : :
1438 : : /* Now test that a single-vector command is split correctly.
1439 : : * Offset 14, length 8, payload 0xF000
1440 : : * Child - Offset 14, length 2, payload 0xF000
1441 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1442 : : *
1443 : : * Set up the expected values before calling spdk_bdev_read_blocks
1444 : : */
1445 : 5 : g_io_done = false;
1446 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1447 : 5 : expected_io->md_buf = md_buf;
1448 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1449 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1450 : :
1451 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1452 : 5 : expected_io->md_buf = md_buf + 2 * 8;
1453 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1454 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1455 : :
1456 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
1457 : 5 : rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf,
1458 : : 14, 8, io_done, NULL);
1459 : 5 : CU_ASSERT(rc == 0);
1460 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1461 : :
1462 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1463 : 5 : stub_complete_io(2);
1464 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1465 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1466 : :
1467 : : /* Now set up a more complex, multi-vector command that needs to be split,
1468 : : * including splitting iovecs.
1469 : : */
1470 : 5 : iov[0].iov_base = (void *)0x10000;
1471 : 5 : iov[0].iov_len = 512;
1472 : 5 : iov[1].iov_base = (void *)0x20000;
1473 : 5 : iov[1].iov_len = 20 * 512;
1474 : 5 : iov[2].iov_base = (void *)0x30000;
1475 : 5 : iov[2].iov_len = 11 * 512;
1476 : :
1477 : 5 : g_io_done = false;
1478 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1479 : 5 : expected_io->md_buf = md_buf;
1480 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1481 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1482 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1483 : :
1484 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1485 : 5 : expected_io->md_buf = md_buf + 2 * 8;
1486 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1487 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1488 : :
1489 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1490 : 5 : expected_io->md_buf = md_buf + 18 * 8;
1491 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1492 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1493 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1494 : :
1495 : 5 : rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf,
1496 : : 14, 32, io_done, NULL);
1497 : 5 : CU_ASSERT(rc == 0);
1498 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1499 : :
1500 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1501 : 5 : stub_complete_io(3);
1502 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1503 : :
1504 : : /* Test multi vector command that needs to be split by strip and then needs to be
1505 : : * split further due to the capacity of child iovs.
1506 : : */
1507 [ + + ]: 325 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1508 : 320 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1509 : 320 : iov[i].iov_len = 512;
1510 : : }
1511 : :
1512 : 5 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1513 : 5 : g_io_done = false;
1514 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1515 : : SPDK_BDEV_IO_NUM_CHILD_IOV);
1516 : 5 : expected_io->md_buf = md_buf;
1517 [ + + ]: 165 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1518 : 160 : ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1519 : : }
1520 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1521 : :
1522 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1523 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
1524 : 5 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1525 [ + + ]: 165 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1526 : 160 : ut_expected_io_set_iov(expected_io, i,
1527 : 160 : (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1528 : : }
1529 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1530 : :
1531 : 5 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1532 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1533 : 5 : CU_ASSERT(rc == 0);
1534 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1535 : :
1536 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1537 : 5 : stub_complete_io(1);
1538 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1539 : :
1540 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1541 : 5 : stub_complete_io(1);
1542 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1543 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1544 : :
1545 : : /* Test multi vector command that needs to be split by strip and then needs to be
1546 : : * split further due to the capacity of child iovs. In this case, the length of
1547 : : * the rest of iovec array with an I/O boundary is the multiple of block size.
1548 : : */
1549 : :
1550 : : /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1551 : : * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1552 : : */
1553 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1554 : 150 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1555 : 150 : iov[i].iov_len = 512;
1556 : : }
1557 [ + + ]: 15 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1558 : 10 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1559 : 10 : iov[i].iov_len = 256;
1560 : : }
1561 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1562 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1563 : :
1564 : : /* Add an extra iovec to trigger split */
1565 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1566 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1567 : :
1568 : 5 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1569 : 5 : g_io_done = false;
1570 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1571 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV);
1572 : 5 : expected_io->md_buf = md_buf;
1573 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1574 : 150 : ut_expected_io_set_iov(expected_io, i,
1575 : 150 : (void *)((i + 1) * 0x10000), 512);
1576 : : }
1577 [ + + ]: 15 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1578 : 10 : ut_expected_io_set_iov(expected_io, i,
1579 : 10 : (void *)((i + 1) * 0x10000), 256);
1580 : : }
1581 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1582 : :
1583 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1584 : : 1, 1);
1585 : 5 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1586 : 5 : ut_expected_io_set_iov(expected_io, 0,
1587 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1588 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1589 : :
1590 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1591 : : 1, 1);
1592 : 5 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1593 : 5 : ut_expected_io_set_iov(expected_io, 0,
1594 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1595 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1596 : :
1597 : 5 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf,
1598 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1599 : 5 : CU_ASSERT(rc == 0);
1600 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1601 : :
1602 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1603 : 5 : stub_complete_io(1);
1604 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1605 : :
1606 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1607 : 5 : stub_complete_io(2);
1608 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1609 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1610 : :
1611 : : /* Test multi vector command that needs to be split by strip and then needs to be
1612 : : * split further due to the capacity of child iovs, the child request offset should
1613 : : * be rewind to last aligned offset and go success without error.
1614 : : */
1615 [ + + ]: 160 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1616 : 155 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1617 : 155 : iov[i].iov_len = 512;
1618 : : }
1619 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1620 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1621 : :
1622 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1623 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1624 : :
1625 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1626 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1627 : :
1628 : 5 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1629 : 5 : g_io_done = false;
1630 : 5 : g_io_status = 0;
1631 : : /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */
1632 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1633 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1634 : 5 : expected_io->md_buf = md_buf;
1635 [ + + ]: 160 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1636 : 155 : ut_expected_io_set_iov(expected_io, i,
1637 : 155 : (void *)((i + 1) * 0x10000), 512);
1638 : : }
1639 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1640 : : /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */
1641 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1642 : : 1, 2);
1643 : 5 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1644 : 5 : ut_expected_io_set_iov(expected_io, 0,
1645 : : (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1646 : 5 : ut_expected_io_set_iov(expected_io, 1,
1647 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1648 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1649 : : /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */
1650 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1651 : : 1, 1);
1652 : 5 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1653 : 5 : ut_expected_io_set_iov(expected_io, 0,
1654 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1655 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1656 : :
1657 : 5 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1658 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1659 : 5 : CU_ASSERT(rc == 0);
1660 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1661 : :
1662 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1663 : 5 : stub_complete_io(1);
1664 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1665 : :
1666 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1667 : 5 : stub_complete_io(2);
1668 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1669 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1670 : :
1671 : : /* Test multi vector command that needs to be split due to the IO boundary and
1672 : : * the capacity of child iovs. Especially test the case when the command is
1673 : : * split due to the capacity of child iovs, the tail address is not aligned with
1674 : : * block size and is rewinded to the aligned address.
1675 : : *
1676 : : * The iovecs used in read request is complex but is based on the data
1677 : : * collected in the real issue. We change the base addresses but keep the lengths
1678 : : * not to loose the credibility of the test.
1679 : : */
1680 : 5 : bdev->optimal_io_boundary = 128;
1681 : 5 : g_io_done = false;
1682 : 5 : g_io_status = 0;
1683 : :
1684 [ + + ]: 160 : for (i = 0; i < 31; i++) {
1685 : 155 : iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
1686 : 155 : iov[i].iov_len = 1024;
1687 : : }
1688 : 5 : iov[31].iov_base = (void *)0xFEED1F00000;
1689 : 5 : iov[31].iov_len = 32768;
1690 : 5 : iov[32].iov_base = (void *)0xFEED2000000;
1691 : 5 : iov[32].iov_len = 160;
1692 : 5 : iov[33].iov_base = (void *)0xFEED2100000;
1693 : 5 : iov[33].iov_len = 4096;
1694 : 5 : iov[34].iov_base = (void *)0xFEED2200000;
1695 : 5 : iov[34].iov_len = 4096;
1696 : 5 : iov[35].iov_base = (void *)0xFEED2300000;
1697 : 5 : iov[35].iov_len = 4096;
1698 : 5 : iov[36].iov_base = (void *)0xFEED2400000;
1699 : 5 : iov[36].iov_len = 4096;
1700 : 5 : iov[37].iov_base = (void *)0xFEED2500000;
1701 : 5 : iov[37].iov_len = 4096;
1702 : 5 : iov[38].iov_base = (void *)0xFEED2600000;
1703 : 5 : iov[38].iov_len = 4096;
1704 : 5 : iov[39].iov_base = (void *)0xFEED2700000;
1705 : 5 : iov[39].iov_len = 4096;
1706 : 5 : iov[40].iov_base = (void *)0xFEED2800000;
1707 : 5 : iov[40].iov_len = 4096;
1708 : 5 : iov[41].iov_base = (void *)0xFEED2900000;
1709 : 5 : iov[41].iov_len = 4096;
1710 : 5 : iov[42].iov_base = (void *)0xFEED2A00000;
1711 : 5 : iov[42].iov_len = 4096;
1712 : 5 : iov[43].iov_base = (void *)0xFEED2B00000;
1713 : 5 : iov[43].iov_len = 12288;
1714 : 5 : iov[44].iov_base = (void *)0xFEED2C00000;
1715 : 5 : iov[44].iov_len = 8192;
1716 : 5 : iov[45].iov_base = (void *)0xFEED2F00000;
1717 : 5 : iov[45].iov_len = 4096;
1718 : 5 : iov[46].iov_base = (void *)0xFEED3000000;
1719 : 5 : iov[46].iov_len = 4096;
1720 : 5 : iov[47].iov_base = (void *)0xFEED3100000;
1721 : 5 : iov[47].iov_len = 4096;
1722 : 5 : iov[48].iov_base = (void *)0xFEED3200000;
1723 : 5 : iov[48].iov_len = 24576;
1724 : 5 : iov[49].iov_base = (void *)0xFEED3300000;
1725 : 5 : iov[49].iov_len = 16384;
1726 : 5 : iov[50].iov_base = (void *)0xFEED3400000;
1727 : 5 : iov[50].iov_len = 12288;
1728 : 5 : iov[51].iov_base = (void *)0xFEED3500000;
1729 : 5 : iov[51].iov_len = 4096;
1730 : 5 : iov[52].iov_base = (void *)0xFEED3600000;
1731 : 5 : iov[52].iov_len = 4096;
1732 : 5 : iov[53].iov_base = (void *)0xFEED3700000;
1733 : 5 : iov[53].iov_len = 4096;
1734 : 5 : iov[54].iov_base = (void *)0xFEED3800000;
1735 : 5 : iov[54].iov_len = 28672;
1736 : 5 : iov[55].iov_base = (void *)0xFEED3900000;
1737 : 5 : iov[55].iov_len = 20480;
1738 : 5 : iov[56].iov_base = (void *)0xFEED3A00000;
1739 : 5 : iov[56].iov_len = 4096;
1740 : 5 : iov[57].iov_base = (void *)0xFEED3B00000;
1741 : 5 : iov[57].iov_len = 12288;
1742 : 5 : iov[58].iov_base = (void *)0xFEED3C00000;
1743 : 5 : iov[58].iov_len = 4096;
1744 : 5 : iov[59].iov_base = (void *)0xFEED3D00000;
1745 : 5 : iov[59].iov_len = 4096;
1746 : 5 : iov[60].iov_base = (void *)0xFEED3E00000;
1747 : 5 : iov[60].iov_len = 352;
1748 : :
1749 : : /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
1750 : : * of child iovs,
1751 : : */
1752 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
1753 : 5 : expected_io->md_buf = md_buf;
1754 [ + + ]: 165 : for (i = 0; i < 32; i++) {
1755 : 160 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1756 : : }
1757 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1758 : :
1759 : : /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
1760 : : * split by the IO boundary requirement.
1761 : : */
1762 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
1763 : 5 : expected_io->md_buf = md_buf + 126 * 8;
1764 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
1765 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
1766 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1767 : :
1768 : : /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
1769 : : * the first 864 bytes of iov[46] split by the IO boundary requirement.
1770 : : */
1771 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
1772 : 5 : expected_io->md_buf = md_buf + 128 * 8;
1773 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
1774 : 5 : iov[33].iov_len - 864);
1775 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
1776 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
1777 : 5 : ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
1778 : 5 : ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
1779 : 5 : ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
1780 : 5 : ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
1781 : 5 : ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
1782 : 5 : ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
1783 : 5 : ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
1784 : 5 : ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
1785 : 5 : ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
1786 : 5 : ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
1787 : 5 : ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
1788 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1789 : :
1790 : : /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
1791 : : * first 864 bytes of iov[52] split by the IO boundary requirement.
1792 : : */
1793 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
1794 : 5 : expected_io->md_buf = md_buf + 256 * 8;
1795 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
1796 : 5 : iov[46].iov_len - 864);
1797 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
1798 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
1799 : 5 : ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
1800 : 5 : ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
1801 : 5 : ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
1802 : 5 : ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
1803 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1804 : :
1805 : : /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
1806 : : * the first 4096 bytes of iov[57] split by the IO boundary requirement.
1807 : : */
1808 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
1809 : 5 : expected_io->md_buf = md_buf + 384 * 8;
1810 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
1811 : 5 : iov[52].iov_len - 864);
1812 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
1813 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
1814 : 5 : ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
1815 : 5 : ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
1816 : 5 : ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
1817 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1818 : :
1819 : : /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
1820 : : * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
1821 : : */
1822 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
1823 : 5 : expected_io->md_buf = md_buf + 512 * 8;
1824 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
1825 : 5 : iov[57].iov_len - 4960);
1826 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
1827 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
1828 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1829 : :
1830 : : /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
1831 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
1832 : 5 : expected_io->md_buf = md_buf + 542 * 8;
1833 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
1834 : 5 : iov[59].iov_len - 3936);
1835 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
1836 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1837 : :
1838 : 5 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf,
1839 : : 0, 543, io_done, NULL);
1840 : 5 : CU_ASSERT(rc == 0);
1841 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1842 : :
1843 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1844 : 5 : stub_complete_io(1);
1845 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1846 : :
1847 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1848 : 5 : stub_complete_io(5);
1849 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1850 : :
1851 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1852 : 5 : stub_complete_io(1);
1853 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1854 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1855 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1856 : :
1857 : : /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
1858 : : * split, so test that.
1859 : : */
1860 : 5 : bdev->optimal_io_boundary = 15;
1861 : 5 : g_io_done = false;
1862 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1863 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1864 : :
1865 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1866 : 5 : CU_ASSERT(rc == 0);
1867 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1868 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1869 : 5 : stub_complete_io(1);
1870 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1871 : :
1872 : : /* Test an UNMAP. This should also not be split. */
1873 : 5 : bdev->optimal_io_boundary = 16;
1874 : 5 : g_io_done = false;
1875 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1876 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1877 : :
1878 : 5 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1879 : 5 : CU_ASSERT(rc == 0);
1880 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1881 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1882 : 5 : stub_complete_io(1);
1883 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1884 : :
1885 : : /* Test a FLUSH. This should also not be split. */
1886 : 5 : bdev->optimal_io_boundary = 16;
1887 : 5 : g_io_done = false;
1888 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1889 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1890 : :
1891 : 5 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1892 : 5 : CU_ASSERT(rc == 0);
1893 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1894 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1895 : 5 : stub_complete_io(1);
1896 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1897 : :
1898 : : /* Test a COPY. This should also not be split. */
1899 : 5 : bdev->optimal_io_boundary = 15;
1900 : 5 : g_io_done = false;
1901 : 5 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
1902 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1903 : :
1904 : 5 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
1905 : 5 : CU_ASSERT(rc == 0);
1906 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1907 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1908 : 5 : stub_complete_io(1);
1909 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1910 : :
1911 : 5 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1912 : :
1913 : : /* Children requests return an error status */
1914 : 5 : bdev->optimal_io_boundary = 16;
1915 : 5 : iov[0].iov_base = (void *)0x10000;
1916 : 5 : iov[0].iov_len = 512 * 64;
1917 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1918 : 5 : g_io_done = false;
1919 : 5 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1920 : :
1921 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1922 : 5 : CU_ASSERT(rc == 0);
1923 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1924 : 5 : stub_complete_io(4);
1925 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1926 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1927 : 5 : stub_complete_io(1);
1928 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1929 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1930 : :
1931 : : /* Test if a multi vector command terminated with failure before continuing
1932 : : * splitting process when one of child I/O failed.
1933 : : * The multi vector command is as same as the above that needs to be split by strip
1934 : : * and then needs to be split further due to the capacity of child iovs.
1935 : : */
1936 [ + + ]: 160 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1937 : 155 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1938 : 155 : iov[i].iov_len = 512;
1939 : : }
1940 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1941 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1942 : :
1943 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1944 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1945 : :
1946 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1947 : 5 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1948 : :
1949 : 5 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1950 : :
1951 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1952 : 5 : g_io_done = false;
1953 : 5 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1954 : :
1955 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
1956 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1957 : 5 : CU_ASSERT(rc == 0);
1958 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
1959 : :
1960 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1961 : 5 : stub_complete_io(1);
1962 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
1963 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1964 : :
1965 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1966 : :
1967 : : /* for this test we will create the following conditions to hit the code path where
1968 : : * we are trying to send and IO following a split that has no iovs because we had to
1969 : : * trim them for alignment reasons.
1970 : : *
1971 : : * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
1972 : : * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
1973 : : * position 30 and overshoot by 0x2e.
1974 : : * - That means we'll send the IO and loop back to pick up the remaining bytes at
1975 : : * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
1976 : : * which eliniates that vector so we just send the first split IO with 30 vectors
1977 : : * and let the completion pick up the last 2 vectors.
1978 : : */
1979 : 5 : bdev->optimal_io_boundary = 32;
1980 : 5 : bdev->split_on_optimal_io_boundary = true;
1981 : 5 : g_io_done = false;
1982 : :
1983 : : /* Init all parent IOVs to 0x212 */
1984 [ + + ]: 175 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
1985 : 170 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1986 : 170 : iov[i].iov_len = 0x212;
1987 : : }
1988 : :
1989 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1990 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1991 : : /* expect 0-29 to be 1:1 with the parent iov */
1992 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1993 : 150 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1994 : : }
1995 : :
1996 : : /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
1997 : : * where 0x1e is the amount we overshot the 16K boundary
1998 : : */
1999 : 5 : ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2000 : : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
2001 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2002 : :
2003 : : /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
2004 : : * shortened that take it to the next boundary and then a final one to get us to
2005 : : * 0x4200 bytes for the IO.
2006 : : */
2007 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
2008 : : SPDK_BDEV_IO_NUM_CHILD_IOV, 2);
2009 : : /* position 30 picked up the remaining bytes to the next boundary */
2010 : 5 : ut_expected_io_set_iov(expected_io, 0,
2011 : 5 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
2012 : :
2013 : : /* position 31 picked the the rest of the transfer to get us to 0x4200 */
2014 : 5 : ut_expected_io_set_iov(expected_io, 1,
2015 : : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
2016 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2017 : :
2018 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0,
2019 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2020 : 5 : CU_ASSERT(rc == 0);
2021 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2022 : :
2023 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2024 : 5 : stub_complete_io(1);
2025 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2026 : :
2027 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2028 : 5 : stub_complete_io(1);
2029 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2030 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2031 : :
2032 : 5 : spdk_put_io_channel(io_ch);
2033 : 5 : spdk_bdev_close(desc);
2034 : 5 : free_bdev(bdev);
2035 : 5 : ut_fini_bdev();
2036 : 5 : }
2037 : :
2038 : : static void
2039 : 5 : bdev_io_max_size_and_segment_split_test(void)
2040 : : {
2041 : : struct spdk_bdev *bdev;
2042 : 5 : struct spdk_bdev_desc *desc = NULL;
2043 : : struct spdk_io_channel *io_ch;
2044 : 5 : struct spdk_bdev_opts bdev_opts = {};
2045 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2046 : : struct ut_expected_io *expected_io;
2047 : : uint64_t i;
2048 : : int rc;
2049 : :
2050 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2051 : 5 : bdev_opts.bdev_io_pool_size = 512;
2052 : 5 : bdev_opts.bdev_io_cache_size = 64;
2053 : 5 : bdev_opts.opts_size = sizeof(bdev_opts);
2054 : 5 : ut_init_bdev(&bdev_opts);
2055 : :
2056 : 5 : bdev = allocate_bdev("bdev0");
2057 : :
2058 : 5 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2059 : 5 : CU_ASSERT(rc == 0);
2060 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2061 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
2062 : 5 : CU_ASSERT(io_ch != NULL);
2063 : :
2064 : 5 : bdev->split_on_optimal_io_boundary = false;
2065 : 5 : bdev->optimal_io_boundary = 0;
2066 : :
2067 : : /* Case 0 max_num_segments == 0.
2068 : : * but segment size 2 * 512 > 512
2069 : : */
2070 : 5 : bdev->max_segment_size = 512;
2071 : 5 : bdev->max_num_segments = 0;
2072 : 5 : g_io_done = false;
2073 : :
2074 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2075 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2076 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2077 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2078 : :
2079 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2080 : 5 : CU_ASSERT(rc == 0);
2081 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2082 : :
2083 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2084 : 5 : stub_complete_io(1);
2085 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2086 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2087 : :
2088 : : /* Case 1 max_segment_size == 0
2089 : : * but iov num 2 > 1.
2090 : : */
2091 : 5 : bdev->max_segment_size = 0;
2092 : 5 : bdev->max_num_segments = 1;
2093 : 5 : g_io_done = false;
2094 : :
2095 : 5 : iov[0].iov_base = (void *)0x10000;
2096 : 5 : iov[0].iov_len = 512;
2097 : 5 : iov[1].iov_base = (void *)0x20000;
2098 : 5 : iov[1].iov_len = 8 * 512;
2099 : :
2100 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2101 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len);
2102 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2103 : :
2104 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1);
2105 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len);
2106 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2107 : :
2108 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL);
2109 : 5 : CU_ASSERT(rc == 0);
2110 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2111 : :
2112 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2113 : 5 : stub_complete_io(2);
2114 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2115 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2116 : :
2117 : : /* Test that a non-vector command is split correctly.
2118 : : * Set up the expected values before calling spdk_bdev_read_blocks
2119 : : */
2120 : 5 : bdev->max_segment_size = 512;
2121 : 5 : bdev->max_num_segments = 1;
2122 : 5 : g_io_done = false;
2123 : :
2124 : : /* Child IO 0 */
2125 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2126 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2127 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2128 : :
2129 : : /* Child IO 1 */
2130 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2131 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512);
2132 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2133 : :
2134 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
2135 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2136 : 5 : CU_ASSERT(rc == 0);
2137 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2138 : :
2139 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2140 : 5 : stub_complete_io(2);
2141 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2142 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2143 : :
2144 : : /* Now set up a more complex, multi-vector command that needs to be split,
2145 : : * including splitting iovecs.
2146 : : */
2147 : 5 : bdev->max_segment_size = 2 * 512;
2148 : 5 : bdev->max_num_segments = 1;
2149 : 5 : g_io_done = false;
2150 : :
2151 : 5 : iov[0].iov_base = (void *)0x10000;
2152 : 5 : iov[0].iov_len = 2 * 512;
2153 : 5 : iov[1].iov_base = (void *)0x20000;
2154 : 5 : iov[1].iov_len = 4 * 512;
2155 : 5 : iov[2].iov_base = (void *)0x30000;
2156 : 5 : iov[2].iov_len = 6 * 512;
2157 : :
2158 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2159 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2);
2160 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2161 : :
2162 : : /* Split iov[1].size to 2 iov entries then split the segments */
2163 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2164 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2);
2165 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2166 : :
2167 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1);
2168 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2);
2169 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2170 : :
2171 : : /* Split iov[2].size to 3 iov entries then split the segments */
2172 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1);
2173 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2);
2174 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2175 : :
2176 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1);
2177 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2);
2178 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2179 : :
2180 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1);
2181 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2);
2182 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2183 : :
2184 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL);
2185 : 5 : CU_ASSERT(rc == 0);
2186 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2187 : :
2188 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
2189 : 5 : stub_complete_io(6);
2190 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2191 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2192 : :
2193 : : /* Test multi vector command that needs to be split by strip and then needs to be
2194 : : * split further due to the capacity of parent IO child iovs.
2195 : : */
2196 : 5 : bdev->max_segment_size = 512;
2197 : 5 : bdev->max_num_segments = 1;
2198 : 5 : g_io_done = false;
2199 : :
2200 [ + + ]: 165 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2201 : 160 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2202 : 160 : iov[i].iov_len = 512 * 2;
2203 : : }
2204 : :
2205 : : /* Each input iov.size is split into 2 iovs,
2206 : : * half of the input iov can fill all child iov entries of a single IO.
2207 : : */
2208 [ + + ]: 85 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) {
2209 : 80 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1);
2210 : 80 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2211 : 80 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2212 : :
2213 : 80 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1);
2214 : 80 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2215 : 80 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2216 : : }
2217 : :
2218 : : /* The remaining iov is split in the second round */
2219 [ + + ]: 85 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2220 : 80 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1);
2221 : 80 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2222 : 80 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2223 : :
2224 : 80 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1);
2225 : 80 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2226 : 80 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2227 : : }
2228 : :
2229 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2230 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
2231 : 5 : CU_ASSERT(rc == 0);
2232 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2233 : :
2234 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2235 : 5 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2236 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2237 : :
2238 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2239 : 5 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2240 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2241 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2242 : :
2243 : : /* A wrong case, a child IO that is divided does
2244 : : * not meet the principle of multiples of block size,
2245 : : * and exits with error
2246 : : */
2247 : 5 : bdev->max_segment_size = 512;
2248 : 5 : bdev->max_num_segments = 1;
2249 : 5 : g_io_done = false;
2250 : :
2251 : 5 : iov[0].iov_base = (void *)0x10000;
2252 : 5 : iov[0].iov_len = 512 + 256;
2253 : 5 : iov[1].iov_base = (void *)0x20000;
2254 : 5 : iov[1].iov_len = 256;
2255 : :
2256 : : /* iov[0] is split to 512 and 256.
2257 : : * 256 is less than a block size, and it is found
2258 : : * in the next round of split that it is the first child IO smaller than
2259 : : * the block size, so the error exit
2260 : : */
2261 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1);
2262 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512);
2263 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2264 : :
2265 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL);
2266 : 5 : CU_ASSERT(rc == 0);
2267 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2268 : :
2269 : : /* First child IO is OK */
2270 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2271 : 5 : stub_complete_io(1);
2272 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2273 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2274 : :
2275 : : /* error exit */
2276 : 5 : stub_complete_io(1);
2277 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2278 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
2279 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2280 : :
2281 : : /* Test multi vector command that needs to be split by strip and then needs to be
2282 : : * split further due to the capacity of child iovs.
2283 : : *
2284 : : * In this case, the last two iovs need to be split, but it will exceed the capacity
2285 : : * of child iovs, so it needs to wait until the first batch completed.
2286 : : */
2287 : 5 : bdev->max_segment_size = 512;
2288 : 5 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2289 : 5 : g_io_done = false;
2290 : :
2291 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2292 : 150 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2293 : 150 : iov[i].iov_len = 512;
2294 : : }
2295 [ + + ]: 15 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2296 : 10 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2297 : 10 : iov[i].iov_len = 512 * 2;
2298 : : }
2299 : :
2300 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2301 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
2302 : : /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */
2303 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2304 : 150 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2305 : : }
2306 : : /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */
2307 : 5 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512);
2308 : 5 : ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512);
2309 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2310 : :
2311 : : /* Child iov entries exceed the max num of parent IO so split it in next round */
2312 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2);
2313 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512);
2314 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512);
2315 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2316 : :
2317 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2318 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL);
2319 : 5 : CU_ASSERT(rc == 0);
2320 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2321 : :
2322 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2323 : 5 : stub_complete_io(1);
2324 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2325 : :
2326 : : /* Next round */
2327 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2328 : 5 : stub_complete_io(1);
2329 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2330 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2331 : :
2332 : : /* This case is similar to the previous one, but the io composed of
2333 : : * the last few entries of child iov is not enough for a blocklen, so they
2334 : : * cannot be put into this IO, but wait until the next time.
2335 : : */
2336 : 5 : bdev->max_segment_size = 512;
2337 : 5 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2338 : 5 : g_io_done = false;
2339 : :
2340 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2341 : 150 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2342 : 150 : iov[i].iov_len = 512;
2343 : : }
2344 : :
2345 [ + + ]: 25 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2346 : 20 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2347 : 20 : iov[i].iov_len = 128;
2348 : : }
2349 : :
2350 : : /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2.
2351 : : * Because the left 2 iov is not enough for a blocklen.
2352 : : */
2353 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2354 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2);
2355 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2356 : 150 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2357 : : }
2358 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2359 : :
2360 : : /* The second child io waits until the end of the first child io before executing.
2361 : : * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO.
2362 : : * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2
2363 : : */
2364 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2365 : : 1, 4);
2366 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len);
2367 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len);
2368 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len);
2369 : 5 : ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len);
2370 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2371 : :
2372 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2373 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL);
2374 : 5 : CU_ASSERT(rc == 0);
2375 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2376 : :
2377 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2378 : 5 : stub_complete_io(1);
2379 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2380 : :
2381 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2382 : 5 : stub_complete_io(1);
2383 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2384 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2385 : :
2386 : : /* A very complicated case. Each sg entry exceeds max_segment_size and
2387 : : * needs to be split. At the same time, child io must be a multiple of blocklen.
2388 : : * At the same time, child iovcnt exceeds parent iovcnt.
2389 : : */
2390 : 5 : bdev->max_segment_size = 512 + 128;
2391 : 5 : bdev->max_num_segments = 3;
2392 : 5 : g_io_done = false;
2393 : :
2394 [ + + ]: 155 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2395 : 150 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2396 : 150 : iov[i].iov_len = 512 + 256;
2397 : : }
2398 : :
2399 [ + + ]: 25 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2400 : 20 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2401 : 20 : iov[i].iov_len = 512 + 128;
2402 : : }
2403 : :
2404 : : /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries.
2405 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2406 : : * Generate 9 child IOs.
2407 : : */
2408 [ + + ]: 20 : for (i = 0; i < 3; i++) {
2409 : 15 : uint32_t j = i * 4;
2410 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3);
2411 : 15 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2412 : 15 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2413 : 15 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2414 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2415 : :
2416 : : /* Child io must be a multiple of blocklen
2417 : : * iov[j + 2] must be split. If the third entry is also added,
2418 : : * the multiple of blocklen cannot be guaranteed. But it still
2419 : : * occupies one iov entry of the parent child iov.
2420 : : */
2421 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2);
2422 : 15 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2423 : 15 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2424 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2425 : :
2426 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3);
2427 : 15 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2428 : 15 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2429 : 15 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2430 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2431 : : }
2432 : :
2433 : : /* Child iov position at 27, the 10th child IO
2434 : : * iov entry index is 3 * 4 and offset is 3 * 6
2435 : : */
2436 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3);
2437 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640);
2438 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128);
2439 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256);
2440 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2441 : :
2442 : : /* Child iov position at 30, the 11th child IO */
2443 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2);
2444 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512);
2445 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512);
2446 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2447 : :
2448 : : /* The 2nd split round and iovpos is 0, the 12th child IO */
2449 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3);
2450 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256);
2451 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640);
2452 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128);
2453 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2454 : :
2455 : : /* Consume 9 child IOs and 27 child iov entries.
2456 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2457 : : * Parent IO iov index start from 16 and block offset start from 24
2458 : : */
2459 [ + + ]: 20 : for (i = 0; i < 3; i++) {
2460 : 15 : uint32_t j = i * 4 + 16;
2461 : 15 : uint32_t offset = i * 6 + 24;
2462 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3);
2463 : 15 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2464 : 15 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2465 : 15 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2466 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2467 : :
2468 : : /* Child io must be a multiple of blocklen
2469 : : * iov[j + 2] must be split. If the third entry is also added,
2470 : : * the multiple of blocklen cannot be guaranteed. But it still
2471 : : * occupies one iov entry of the parent child iov.
2472 : : */
2473 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2);
2474 : 15 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2475 : 15 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2476 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2477 : :
2478 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3);
2479 : 15 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2480 : 15 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2481 : 15 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2482 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2483 : : }
2484 : :
2485 : : /* The 22th child IO, child iov position at 30 */
2486 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1);
2487 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512);
2488 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2489 : :
2490 : : /* The third round */
2491 : : /* Here is the 23nd child IO and child iovpos is 0 */
2492 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3);
2493 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256);
2494 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640);
2495 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128);
2496 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2497 : :
2498 : : /* The 24th child IO */
2499 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3);
2500 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640);
2501 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640);
2502 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256);
2503 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2504 : :
2505 : : /* The 25th child IO */
2506 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2);
2507 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384);
2508 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640);
2509 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2510 : :
2511 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2512 : : 50, io_done, NULL);
2513 : 5 : CU_ASSERT(rc == 0);
2514 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2515 : :
2516 : : /* Parent IO supports up to 32 child iovs, so it is calculated that
2517 : : * a maximum of 11 IOs can be split at a time, and the
2518 : : * splitting will continue after the first batch is over.
2519 : : */
2520 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2521 : 5 : stub_complete_io(11);
2522 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2523 : :
2524 : : /* The 2nd round */
2525 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2526 : 5 : stub_complete_io(11);
2527 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2528 : :
2529 : : /* The last round */
2530 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2531 : 5 : stub_complete_io(3);
2532 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2533 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2534 : :
2535 : : /* Test an WRITE_ZEROES. This should also not be split. */
2536 : 5 : bdev->max_segment_size = 512;
2537 : 5 : bdev->max_num_segments = 1;
2538 : 5 : g_io_done = false;
2539 : :
2540 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
2541 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2542 : :
2543 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
2544 : 5 : CU_ASSERT(rc == 0);
2545 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2546 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2547 : 5 : stub_complete_io(1);
2548 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2549 : :
2550 : : /* Test an UNMAP. This should also not be split. */
2551 : 5 : g_io_done = false;
2552 : :
2553 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0);
2554 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2555 : :
2556 : 5 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL);
2557 : 5 : CU_ASSERT(rc == 0);
2558 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2559 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2560 : 5 : stub_complete_io(1);
2561 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2562 : :
2563 : : /* Test a FLUSH. This should also not be split. */
2564 : 5 : g_io_done = false;
2565 : :
2566 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0);
2567 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2568 : :
2569 : 5 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
2570 : 5 : CU_ASSERT(rc == 0);
2571 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2572 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2573 : 5 : stub_complete_io(1);
2574 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2575 : :
2576 : : /* Test a COPY. This should also not be split. */
2577 : 5 : g_io_done = false;
2578 : :
2579 : 5 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
2580 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2581 : :
2582 : 5 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
2583 : 5 : CU_ASSERT(rc == 0);
2584 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2585 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2586 : 5 : stub_complete_io(1);
2587 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2588 : :
2589 : : /* Test that IOs are split on max_rw_size */
2590 : 5 : bdev->max_rw_size = 2;
2591 : 5 : bdev->max_segment_size = 0;
2592 : 5 : bdev->max_num_segments = 0;
2593 : 5 : g_io_done = false;
2594 : :
2595 : : /* 5 blocks in a contiguous buffer */
2596 : 5 : iov[0].iov_base = (void *)0x10000;
2597 : 5 : iov[0].iov_len = 5 * 512;
2598 : :
2599 : : /* First: offset=0, num_blocks=2 */
2600 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2601 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2602 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2603 : : /* Second: offset=2, num_blocks=2 */
2604 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1);
2605 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512);
2606 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2607 : : /* Third: offset=4, num_blocks=1 */
2608 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2609 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512);
2610 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2611 : :
2612 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL);
2613 : 5 : CU_ASSERT(rc == 0);
2614 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2615 : :
2616 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2617 : 5 : stub_complete_io(3);
2618 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2619 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2620 : :
2621 : : /* Check splitting on both max_rw_size + max_num_segments */
2622 : 5 : bdev->max_rw_size = 2;
2623 : 5 : bdev->max_num_segments = 2;
2624 : 5 : bdev->max_segment_size = 0;
2625 : 5 : g_io_done = false;
2626 : :
2627 : : /* 5 blocks split across 4 iovs */
2628 : 5 : iov[0].iov_base = (void *)0x10000;
2629 : 5 : iov[0].iov_len = 3 * 512;
2630 : 5 : iov[1].iov_base = (void *)0x20000;
2631 : 5 : iov[1].iov_len = 256;
2632 : 5 : iov[2].iov_base = (void *)0x30000;
2633 : 5 : iov[2].iov_len = 256;
2634 : 5 : iov[3].iov_base = (void *)0x40000;
2635 : 5 : iov[3].iov_len = 512;
2636 : :
2637 : : /* First: offset=0, num_blocks=2, iovcnt=1 */
2638 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2639 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2640 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2641 : : /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting
2642 : : * the rest of iov[0], and iov[1]+iov[2])
2643 : : */
2644 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1);
2645 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512);
2646 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2647 : : /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */
2648 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2);
2649 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256);
2650 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256);
2651 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2652 : : /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */
2653 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2654 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512);
2655 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2656 : :
2657 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL);
2658 : 5 : CU_ASSERT(rc == 0);
2659 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2660 : :
2661 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2662 : 5 : stub_complete_io(4);
2663 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2664 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2665 : :
2666 : : /* Check splitting on both max_rw_size + max_segment_size */
2667 : 5 : bdev->max_rw_size = 2;
2668 : 5 : bdev->max_segment_size = 512;
2669 : 5 : bdev->max_num_segments = 0;
2670 : 5 : g_io_done = false;
2671 : :
2672 : : /* 6 blocks in a contiguous buffer */
2673 : 5 : iov[0].iov_base = (void *)0x10000;
2674 : 5 : iov[0].iov_len = 6 * 512;
2675 : :
2676 : : /* We expect 3 IOs each with 2 blocks and 2 iovs */
2677 [ + + ]: 20 : for (i = 0; i < 3; ++i) {
2678 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2);
2679 : 15 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512);
2680 : 15 : ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512);
2681 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2682 : : }
2683 : :
2684 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL);
2685 : 5 : CU_ASSERT(rc == 0);
2686 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2687 : :
2688 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2689 : 5 : stub_complete_io(3);
2690 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2691 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2692 : :
2693 : : /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */
2694 : 5 : bdev->max_rw_size = 1;
2695 : 5 : bdev->max_segment_size = 0;
2696 : 5 : bdev->max_num_segments = 0;
2697 : 5 : g_io_done = false;
2698 : :
2699 : : /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */
2700 : 5 : iov[0].iov_base = (void *)0x10000;
2701 : 5 : iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512;
2702 : :
2703 : : /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */
2704 [ + + ]: 20 : for (i = 0; i < 3; ++i) {
2705 : 15 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1);
2706 : 15 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512);
2707 : 15 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2708 : : }
2709 : :
2710 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2711 : 5 : CU_ASSERT(rc == 0);
2712 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2713 : :
2714 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2715 : 5 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2716 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2717 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2718 : 5 : stub_complete_io(1);
2719 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2720 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2721 : :
2722 : 5 : spdk_put_io_channel(io_ch);
2723 : 5 : spdk_bdev_close(desc);
2724 : 5 : free_bdev(bdev);
2725 : 5 : ut_fini_bdev();
2726 : 5 : }
2727 : :
2728 : : static void
2729 : 5 : bdev_io_mix_split_test(void)
2730 : : {
2731 : : struct spdk_bdev *bdev;
2732 : 5 : struct spdk_bdev_desc *desc = NULL;
2733 : : struct spdk_io_channel *io_ch;
2734 : 5 : struct spdk_bdev_opts bdev_opts = {};
2735 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2736 : : struct ut_expected_io *expected_io;
2737 : : uint64_t i;
2738 : : int rc;
2739 : :
2740 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2741 : 5 : bdev_opts.bdev_io_pool_size = 512;
2742 : 5 : bdev_opts.bdev_io_cache_size = 64;
2743 : 5 : ut_init_bdev(&bdev_opts);
2744 : :
2745 : 5 : bdev = allocate_bdev("bdev0");
2746 : :
2747 : 5 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2748 : 5 : CU_ASSERT(rc == 0);
2749 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2750 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
2751 : 5 : CU_ASSERT(io_ch != NULL);
2752 : :
2753 : : /* First case optimal_io_boundary == max_segment_size * max_num_segments */
2754 : 5 : bdev->split_on_optimal_io_boundary = true;
2755 : 5 : bdev->optimal_io_boundary = 16;
2756 : :
2757 : 5 : bdev->max_segment_size = 512;
2758 : 5 : bdev->max_num_segments = 16;
2759 : 5 : g_io_done = false;
2760 : :
2761 : : /* IO crossing the IO boundary requires split
2762 : : * Total 2 child IOs.
2763 : : */
2764 : :
2765 : : /* The 1st child IO split the segment_size to multiple segment entry */
2766 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2767 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2768 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2769 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2770 : :
2771 : : /* The 2nd child IO split the segment_size to multiple segment entry */
2772 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2);
2773 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512);
2774 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512);
2775 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2776 : :
2777 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL);
2778 : 5 : CU_ASSERT(rc == 0);
2779 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2780 : :
2781 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2782 : 5 : stub_complete_io(2);
2783 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2784 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2785 : :
2786 : : /* Second case optimal_io_boundary > max_segment_size * max_num_segments */
2787 : 5 : bdev->max_segment_size = 15 * 512;
2788 : 5 : bdev->max_num_segments = 1;
2789 : 5 : g_io_done = false;
2790 : :
2791 : : /* IO crossing the IO boundary requires split.
2792 : : * The 1st child IO segment size exceeds the max_segment_size,
2793 : : * So 1st child IO will be split to multiple segment entry.
2794 : : * Then it split to 2 child IOs because of the max_num_segments.
2795 : : * Total 3 child IOs.
2796 : : */
2797 : :
2798 : : /* The first 2 IOs are in an IO boundary.
2799 : : * Because the optimal_io_boundary > max_segment_size * max_num_segments
2800 : : * So it split to the first 2 IOs.
2801 : : */
2802 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1);
2803 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15);
2804 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2805 : :
2806 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2807 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512);
2808 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2809 : :
2810 : : /* The 3rd Child IO is because of the io boundary */
2811 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2812 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2813 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2814 : :
2815 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2816 : 5 : CU_ASSERT(rc == 0);
2817 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2818 : :
2819 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2820 : 5 : stub_complete_io(3);
2821 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2822 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2823 : :
2824 : : /* Third case optimal_io_boundary < max_segment_size * max_num_segments */
2825 : 5 : bdev->max_segment_size = 17 * 512;
2826 : 5 : bdev->max_num_segments = 1;
2827 : 5 : g_io_done = false;
2828 : :
2829 : : /* IO crossing the IO boundary requires split.
2830 : : * Child IO does not split.
2831 : : * Total 2 child IOs.
2832 : : */
2833 : :
2834 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1);
2835 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16);
2836 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2837 : :
2838 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2839 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2840 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2841 : :
2842 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2843 : 5 : CU_ASSERT(rc == 0);
2844 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2845 : :
2846 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2847 : 5 : stub_complete_io(2);
2848 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2849 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2850 : :
2851 : : /* Now set up a more complex, multi-vector command that needs to be split,
2852 : : * including splitting iovecs.
2853 : : * optimal_io_boundary < max_segment_size * max_num_segments
2854 : : */
2855 : 5 : bdev->max_segment_size = 3 * 512;
2856 : 5 : bdev->max_num_segments = 6;
2857 : 5 : g_io_done = false;
2858 : :
2859 : 5 : iov[0].iov_base = (void *)0x10000;
2860 : 5 : iov[0].iov_len = 4 * 512;
2861 : 5 : iov[1].iov_base = (void *)0x20000;
2862 : 5 : iov[1].iov_len = 4 * 512;
2863 : 5 : iov[2].iov_base = (void *)0x30000;
2864 : 5 : iov[2].iov_len = 10 * 512;
2865 : :
2866 : : /* IO crossing the IO boundary requires split.
2867 : : * The 1st child IO segment size exceeds the max_segment_size and after
2868 : : * splitting segment_size, the num_segments exceeds max_num_segments.
2869 : : * So 1st child IO will be split to 2 child IOs.
2870 : : * Total 3 child IOs.
2871 : : */
2872 : :
2873 : : /* The first 2 IOs are in an IO boundary.
2874 : : * After splitting segment size the segment num exceeds.
2875 : : * So it splits to 2 child IOs.
2876 : : */
2877 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6);
2878 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3);
2879 : 5 : ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512);
2880 : 5 : ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3);
2881 : 5 : ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512);
2882 : 5 : ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3);
2883 : 5 : ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3);
2884 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2885 : :
2886 : : /* The 2nd child IO has the left segment entry */
2887 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2888 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2);
2889 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2890 : :
2891 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2892 : 5 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2);
2893 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2894 : :
2895 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL);
2896 : 5 : CU_ASSERT(rc == 0);
2897 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2898 : :
2899 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2900 : 5 : stub_complete_io(3);
2901 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2902 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2903 : :
2904 : : /* A very complicated case. Each sg entry exceeds max_segment_size
2905 : : * and split on io boundary.
2906 : : * optimal_io_boundary < max_segment_size * max_num_segments
2907 : : */
2908 : 5 : bdev->max_segment_size = 3 * 512;
2909 : 5 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2910 : 5 : g_io_done = false;
2911 : :
2912 [ + + ]: 105 : for (i = 0; i < 20; i++) {
2913 : 100 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2914 : 100 : iov[i].iov_len = 512 * 4;
2915 : : }
2916 : :
2917 : : /* IO crossing the IO boundary requires split.
2918 : : * 80 block length can split 5 child IOs base on offset and IO boundary.
2919 : : * Each iov entry needs to be split to 2 entries because of max_segment_size
2920 : : * Total 5 child IOs.
2921 : : */
2922 : :
2923 : : /* 4 iov entries are in an IO boundary and each iov entry splits to 2.
2924 : : * So each child IO occupies 8 child iov entries.
2925 : : */
2926 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8);
2927 [ + + ]: 25 : for (i = 0; i < 4; i++) {
2928 : 20 : int iovcnt = i * 2;
2929 : 20 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2930 : 20 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2931 : : }
2932 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2933 : :
2934 : : /* 2nd child IO and total 16 child iov entries of parent IO */
2935 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8);
2936 [ + + ]: 25 : for (i = 4; i < 8; i++) {
2937 : 20 : int iovcnt = (i - 4) * 2;
2938 : 20 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2939 : 20 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2940 : : }
2941 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2942 : :
2943 : : /* 3rd child IO and total 24 child iov entries of parent IO */
2944 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8);
2945 [ + + ]: 25 : for (i = 8; i < 12; i++) {
2946 : 20 : int iovcnt = (i - 8) * 2;
2947 : 20 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2948 : 20 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2949 : : }
2950 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2951 : :
2952 : : /* 4th child IO and total 32 child iov entries of parent IO */
2953 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8);
2954 [ + + ]: 25 : for (i = 12; i < 16; i++) {
2955 : 20 : int iovcnt = (i - 12) * 2;
2956 : 20 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2957 : 20 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2958 : : }
2959 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2960 : :
2961 : : /* 5th child IO and because of the child iov entry it should be split
2962 : : * in next round.
2963 : : */
2964 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8);
2965 [ + + ]: 25 : for (i = 16; i < 20; i++) {
2966 : 20 : int iovcnt = (i - 16) * 2;
2967 : 20 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2968 : 20 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2969 : : }
2970 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2971 : :
2972 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL);
2973 : 5 : CU_ASSERT(rc == 0);
2974 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2975 : :
2976 : : /* First split round */
2977 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2978 : 5 : stub_complete_io(4);
2979 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
2980 : :
2981 : : /* Second split round */
2982 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2983 : 5 : stub_complete_io(1);
2984 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
2985 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2986 : :
2987 : 5 : spdk_put_io_channel(io_ch);
2988 : 5 : spdk_bdev_close(desc);
2989 : 5 : free_bdev(bdev);
2990 : 5 : ut_fini_bdev();
2991 : 5 : }
2992 : :
2993 : : static void
2994 : 5 : bdev_io_split_with_io_wait(void)
2995 : : {
2996 : : struct spdk_bdev *bdev;
2997 : 5 : struct spdk_bdev_desc *desc = NULL;
2998 : : struct spdk_io_channel *io_ch;
2999 : : struct spdk_bdev_channel *channel;
3000 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
3001 : 5 : struct spdk_bdev_opts bdev_opts = {};
3002 : 4 : struct iovec iov[3];
3003 : : struct ut_expected_io *expected_io;
3004 : : int rc;
3005 : :
3006 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3007 : 5 : bdev_opts.bdev_io_pool_size = 2;
3008 : 5 : bdev_opts.bdev_io_cache_size = 1;
3009 : 5 : ut_init_bdev(&bdev_opts);
3010 : :
3011 : 5 : bdev = allocate_bdev("bdev0");
3012 : :
3013 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3014 : 5 : CU_ASSERT(rc == 0);
3015 : 5 : CU_ASSERT(desc != NULL);
3016 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3017 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
3018 : 5 : CU_ASSERT(io_ch != NULL);
3019 : 5 : channel = spdk_io_channel_get_ctx(io_ch);
3020 : 5 : mgmt_ch = channel->shared_resource->mgmt_ch;
3021 : :
3022 : 5 : bdev->optimal_io_boundary = 16;
3023 : 5 : bdev->split_on_optimal_io_boundary = true;
3024 : :
3025 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
3026 : 5 : CU_ASSERT(rc == 0);
3027 : :
3028 : : /* Now test that a single-vector command is split correctly.
3029 : : * Offset 14, length 8, payload 0xF000
3030 : : * Child - Offset 14, length 2, payload 0xF000
3031 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
3032 : : *
3033 : : * Set up the expected values before calling spdk_bdev_read_blocks
3034 : : */
3035 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
3036 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
3037 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3038 : :
3039 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
3040 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
3041 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3042 : :
3043 : : /* The following children will be submitted sequentially due to the capacity of
3044 : : * spdk_bdev_io.
3045 : : */
3046 : :
3047 : : /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
3048 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
3049 : 5 : CU_ASSERT(rc == 0);
3050 : 5 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3051 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3052 : :
3053 : : /* Completing the first read I/O will submit the first child */
3054 : 5 : stub_complete_io(1);
3055 : 5 : CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3056 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3057 : :
3058 : : /* Completing the first child will submit the second child */
3059 : 5 : stub_complete_io(1);
3060 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3061 : :
3062 : : /* Complete the second child I/O. This should result in our callback getting
3063 : : * invoked since the parent I/O is now complete.
3064 : : */
3065 : 5 : stub_complete_io(1);
3066 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3067 : :
3068 : : /* Now set up a more complex, multi-vector command that needs to be split,
3069 : : * including splitting iovecs.
3070 : : */
3071 : 5 : iov[0].iov_base = (void *)0x10000;
3072 : 5 : iov[0].iov_len = 512;
3073 : 5 : iov[1].iov_base = (void *)0x20000;
3074 : 5 : iov[1].iov_len = 20 * 512;
3075 : 5 : iov[2].iov_base = (void *)0x30000;
3076 : 5 : iov[2].iov_len = 11 * 512;
3077 : :
3078 : 5 : g_io_done = false;
3079 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
3080 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
3081 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
3082 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3083 : :
3084 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
3085 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
3086 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3087 : :
3088 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
3089 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
3090 : 5 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
3091 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3092 : :
3093 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
3094 : 5 : CU_ASSERT(rc == 0);
3095 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3096 : :
3097 : : /* The following children will be submitted sequentially due to the capacity of
3098 : : * spdk_bdev_io.
3099 : : */
3100 : :
3101 : : /* Completing the first child will submit the second child */
3102 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3103 : 5 : stub_complete_io(1);
3104 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3105 : :
3106 : : /* Completing the second child will submit the third child */
3107 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3108 : 5 : stub_complete_io(1);
3109 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3110 : :
3111 : : /* Completing the third child will result in our callback getting invoked
3112 : : * since the parent I/O is now complete.
3113 : : */
3114 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3115 : 5 : stub_complete_io(1);
3116 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
3117 : :
3118 : 5 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
3119 : :
3120 : 5 : spdk_put_io_channel(io_ch);
3121 : 5 : spdk_bdev_close(desc);
3122 : 5 : free_bdev(bdev);
3123 : 5 : ut_fini_bdev();
3124 : 5 : }
3125 : :
3126 : : static void
3127 : 5 : bdev_io_write_unit_split_test(void)
3128 : : {
3129 : : struct spdk_bdev *bdev;
3130 : 5 : struct spdk_bdev_desc *desc = NULL;
3131 : : struct spdk_io_channel *io_ch;
3132 : 5 : struct spdk_bdev_opts bdev_opts = {};
3133 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4];
3134 : : struct ut_expected_io *expected_io;
3135 : : uint64_t i;
3136 : : int rc;
3137 : :
3138 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3139 : 5 : bdev_opts.bdev_io_pool_size = 512;
3140 : 5 : bdev_opts.bdev_io_cache_size = 64;
3141 : 5 : ut_init_bdev(&bdev_opts);
3142 : :
3143 : 5 : bdev = allocate_bdev("bdev0");
3144 : :
3145 : 5 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
3146 : 5 : CU_ASSERT(rc == 0);
3147 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3148 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
3149 : 5 : CU_ASSERT(io_ch != NULL);
3150 : :
3151 : : /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */
3152 : 5 : bdev->write_unit_size = 32;
3153 : 5 : bdev->split_on_write_unit = true;
3154 : 5 : g_io_done = false;
3155 : :
3156 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1);
3157 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512);
3158 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3159 : :
3160 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1);
3161 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512);
3162 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3163 : :
3164 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3165 : 5 : CU_ASSERT(rc == 0);
3166 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3167 : :
3168 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3169 : 5 : stub_complete_io(2);
3170 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
3171 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3172 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3173 : :
3174 : : /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split
3175 : : * based on write_unit_size, not optimal_io_boundary */
3176 : 5 : bdev->split_on_optimal_io_boundary = true;
3177 : 5 : bdev->optimal_io_boundary = 16;
3178 : 5 : g_io_done = false;
3179 : :
3180 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3181 : 5 : CU_ASSERT(rc == 0);
3182 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3183 : :
3184 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3185 : 5 : stub_complete_io(2);
3186 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
3187 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3188 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3189 : :
3190 : : /* Write I/O should fail if it is smaller than write_unit_size */
3191 : 5 : g_io_done = false;
3192 : :
3193 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL);
3194 : 5 : CU_ASSERT(rc == 0);
3195 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3196 : :
3197 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3198 : 5 : poll_threads();
3199 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
3200 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3201 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3202 : :
3203 : : /* Same for I/O not aligned to write_unit_size */
3204 : 5 : g_io_done = false;
3205 : :
3206 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL);
3207 : 5 : CU_ASSERT(rc == 0);
3208 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3209 : :
3210 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3211 : 5 : poll_threads();
3212 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
3213 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3214 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3215 : :
3216 : : /* Write should fail if it needs to be split but there are not enough iovs to submit
3217 : : * an entire write unit */
3218 : 5 : bdev->write_unit_size = SPDK_COUNTOF(iov) / 2;
3219 : 5 : g_io_done = false;
3220 : :
3221 [ + + ]: 645 : for (i = 0; i < SPDK_COUNTOF(iov); i++) {
3222 : 640 : iov[i].iov_base = (void *)(0x1000 + 512 * i);
3223 : 640 : iov[i].iov_len = 512;
3224 : : }
3225 : :
3226 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov),
3227 : : io_done, NULL);
3228 : 5 : CU_ASSERT(rc == 0);
3229 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
3230 : :
3231 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3232 : 5 : poll_threads();
3233 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
3234 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3235 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3236 : :
3237 : 5 : spdk_put_io_channel(io_ch);
3238 : 5 : spdk_bdev_close(desc);
3239 : 5 : free_bdev(bdev);
3240 : 5 : ut_fini_bdev();
3241 : 5 : }
3242 : :
3243 : : static void
3244 : 5 : bdev_io_alignment(void)
3245 : : {
3246 : : struct spdk_bdev *bdev;
3247 : 5 : struct spdk_bdev_desc *desc = NULL;
3248 : : struct spdk_io_channel *io_ch;
3249 : 5 : struct spdk_bdev_opts bdev_opts = {};
3250 : : int rc;
3251 : 5 : void *buf = NULL;
3252 : 4 : struct iovec iovs[2];
3253 : : int iovcnt;
3254 : : uint64_t alignment;
3255 : :
3256 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3257 : 5 : bdev_opts.bdev_io_pool_size = 20;
3258 : 5 : bdev_opts.bdev_io_cache_size = 2;
3259 : 5 : ut_init_bdev(&bdev_opts);
3260 : :
3261 : 5 : fn_table.submit_request = stub_submit_request_get_buf;
3262 : 5 : bdev = allocate_bdev("bdev0");
3263 : :
3264 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3265 : 5 : CU_ASSERT(rc == 0);
3266 : 5 : CU_ASSERT(desc != NULL);
3267 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3268 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
3269 : 5 : CU_ASSERT(io_ch != NULL);
3270 : :
3271 : : /* Create aligned buffer */
3272 [ - + ]: 5 : rc = posix_memalign(&buf, 4096, 8192);
3273 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(rc == 0);
3274 : :
3275 : : /* Pass aligned single buffer with no alignment required */
3276 : 5 : alignment = 1;
3277 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3278 : :
3279 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3280 : 5 : CU_ASSERT(rc == 0);
3281 : 5 : stub_complete_io(1);
3282 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3283 : : alignment));
3284 : :
3285 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3286 : 5 : CU_ASSERT(rc == 0);
3287 : 5 : stub_complete_io(1);
3288 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3289 : : alignment));
3290 : :
3291 : : /* Pass unaligned single buffer with no alignment required */
3292 : 5 : alignment = 1;
3293 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3294 : :
3295 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3296 : 5 : CU_ASSERT(rc == 0);
3297 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3298 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3299 : 5 : stub_complete_io(1);
3300 : :
3301 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3302 : 5 : CU_ASSERT(rc == 0);
3303 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3304 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3305 : 5 : stub_complete_io(1);
3306 : :
3307 : : /* Pass unaligned single buffer with 512 alignment required */
3308 : 5 : alignment = 512;
3309 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3310 : :
3311 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3312 : 5 : CU_ASSERT(rc == 0);
3313 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3314 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3315 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3316 : : alignment));
3317 : 5 : stub_complete_io(1);
3318 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3319 : :
3320 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3321 : 5 : CU_ASSERT(rc == 0);
3322 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3323 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3324 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3325 : : alignment));
3326 : 5 : stub_complete_io(1);
3327 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3328 : :
3329 : : /* Pass unaligned single buffer with 4096 alignment required */
3330 : 5 : alignment = 4096;
3331 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3332 : :
3333 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3334 : 5 : CU_ASSERT(rc == 0);
3335 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3336 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3337 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3338 : : alignment));
3339 : 5 : stub_complete_io(1);
3340 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3341 : :
3342 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3343 : 5 : CU_ASSERT(rc == 0);
3344 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
3345 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3346 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3347 : : alignment));
3348 : 5 : stub_complete_io(1);
3349 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3350 : :
3351 : : /* Pass aligned iovs with no alignment required */
3352 : 5 : alignment = 1;
3353 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3354 : :
3355 : 5 : iovcnt = 1;
3356 : 5 : iovs[0].iov_base = buf;
3357 : 5 : iovs[0].iov_len = 512;
3358 : :
3359 : 5 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3360 : 5 : CU_ASSERT(rc == 0);
3361 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3362 : 5 : stub_complete_io(1);
3363 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3364 : :
3365 : 5 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3366 : 5 : CU_ASSERT(rc == 0);
3367 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3368 : 5 : stub_complete_io(1);
3369 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3370 : :
3371 : : /* Pass unaligned iovs with no alignment required */
3372 : 5 : alignment = 1;
3373 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3374 : :
3375 : 5 : iovcnt = 2;
3376 : 5 : iovs[0].iov_base = buf + 16;
3377 : 5 : iovs[0].iov_len = 256;
3378 : 5 : iovs[1].iov_base = buf + 16 + 256 + 32;
3379 : 5 : iovs[1].iov_len = 256;
3380 : :
3381 : 5 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3382 : 5 : CU_ASSERT(rc == 0);
3383 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3384 : 5 : stub_complete_io(1);
3385 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3386 : :
3387 : 5 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3388 : 5 : CU_ASSERT(rc == 0);
3389 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3390 : 5 : stub_complete_io(1);
3391 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3392 : :
3393 : : /* Pass unaligned iov with 2048 alignment required */
3394 : 5 : alignment = 2048;
3395 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3396 : :
3397 : 5 : iovcnt = 2;
3398 : 5 : iovs[0].iov_base = buf + 16;
3399 : 5 : iovs[0].iov_len = 256;
3400 : 5 : iovs[1].iov_base = buf + 16 + 256 + 32;
3401 : 5 : iovs[1].iov_len = 256;
3402 : :
3403 : 5 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3404 : 5 : CU_ASSERT(rc == 0);
3405 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
3406 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3407 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3408 : : alignment));
3409 : 5 : stub_complete_io(1);
3410 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3411 : :
3412 : 5 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3413 : 5 : CU_ASSERT(rc == 0);
3414 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
3415 : 5 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
3416 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3417 : : alignment));
3418 : 5 : stub_complete_io(1);
3419 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3420 : :
3421 : : /* Pass iov without allocated buffer without alignment required */
3422 : 5 : alignment = 1;
3423 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3424 : :
3425 : 5 : iovcnt = 1;
3426 : 5 : iovs[0].iov_base = NULL;
3427 : 5 : iovs[0].iov_len = 0;
3428 : :
3429 : 5 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3430 : 5 : CU_ASSERT(rc == 0);
3431 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3432 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3433 : : alignment));
3434 : 5 : stub_complete_io(1);
3435 : :
3436 : : /* Pass iov without allocated buffer with 1024 alignment required */
3437 : 5 : alignment = 1024;
3438 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3439 : :
3440 : 5 : iovcnt = 1;
3441 : 5 : iovs[0].iov_base = NULL;
3442 : 5 : iovs[0].iov_len = 0;
3443 : :
3444 : 5 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3445 : 5 : CU_ASSERT(rc == 0);
3446 : 5 : CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
3447 : 5 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3448 : : alignment));
3449 : 5 : stub_complete_io(1);
3450 : :
3451 : 5 : spdk_put_io_channel(io_ch);
3452 : 5 : spdk_bdev_close(desc);
3453 : 5 : free_bdev(bdev);
3454 : 5 : fn_table.submit_request = stub_submit_request;
3455 : 5 : ut_fini_bdev();
3456 : :
3457 : 5 : free(buf);
3458 : 5 : }
3459 : :
3460 : : static void
3461 : 5 : bdev_io_alignment_with_boundary(void)
3462 : : {
3463 : : struct spdk_bdev *bdev;
3464 : 5 : struct spdk_bdev_desc *desc = NULL;
3465 : : struct spdk_io_channel *io_ch;
3466 : 5 : struct spdk_bdev_opts bdev_opts = {};
3467 : : int rc;
3468 : 5 : void *buf = NULL;
3469 : 4 : struct iovec iovs[2];
3470 : : int iovcnt;
3471 : : uint64_t alignment;
3472 : :
3473 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3474 : 5 : bdev_opts.bdev_io_pool_size = 20;
3475 : 5 : bdev_opts.bdev_io_cache_size = 2;
3476 : 5 : bdev_opts.opts_size = sizeof(bdev_opts);
3477 : 5 : ut_init_bdev(&bdev_opts);
3478 : :
3479 : 5 : fn_table.submit_request = stub_submit_request_get_buf;
3480 : 5 : bdev = allocate_bdev("bdev0");
3481 : :
3482 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3483 : 5 : CU_ASSERT(rc == 0);
3484 : 5 : CU_ASSERT(desc != NULL);
3485 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3486 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
3487 : 5 : CU_ASSERT(io_ch != NULL);
3488 : :
3489 : : /* Create aligned buffer */
3490 [ - + ]: 5 : rc = posix_memalign(&buf, 4096, 131072);
3491 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(rc == 0);
3492 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3493 : :
3494 : : /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
3495 : 5 : alignment = 512;
3496 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3497 : 5 : bdev->optimal_io_boundary = 2;
3498 : 5 : bdev->split_on_optimal_io_boundary = true;
3499 : :
3500 : 5 : iovcnt = 1;
3501 : 5 : iovs[0].iov_base = NULL;
3502 : 5 : iovs[0].iov_len = 512 * 3;
3503 : :
3504 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3505 : 5 : CU_ASSERT(rc == 0);
3506 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3507 : 5 : stub_complete_io(2);
3508 : :
3509 : : /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
3510 : 5 : alignment = 512;
3511 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3512 : 5 : bdev->optimal_io_boundary = 16;
3513 : 5 : bdev->split_on_optimal_io_boundary = true;
3514 : :
3515 : 5 : iovcnt = 1;
3516 : 5 : iovs[0].iov_base = NULL;
3517 : 5 : iovs[0].iov_len = 512 * 16;
3518 : :
3519 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
3520 : 5 : CU_ASSERT(rc == 0);
3521 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3522 : 5 : stub_complete_io(2);
3523 : :
3524 : : /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
3525 : 5 : alignment = 512;
3526 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3527 : 5 : bdev->optimal_io_boundary = 128;
3528 : 5 : bdev->split_on_optimal_io_boundary = true;
3529 : :
3530 : 5 : iovcnt = 1;
3531 : 5 : iovs[0].iov_base = buf + 16;
3532 : 5 : iovs[0].iov_len = 512 * 160;
3533 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3534 : 5 : CU_ASSERT(rc == 0);
3535 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3536 : 5 : stub_complete_io(2);
3537 : :
3538 : : /* 512 * 3 with 2 IO boundary */
3539 : 5 : alignment = 512;
3540 : 5 : bdev->required_alignment = spdk_u32log2(alignment);
3541 : 5 : bdev->optimal_io_boundary = 2;
3542 : 5 : bdev->split_on_optimal_io_boundary = true;
3543 : :
3544 : 5 : iovcnt = 2;
3545 : 5 : iovs[0].iov_base = buf + 16;
3546 : 5 : iovs[0].iov_len = 512;
3547 : 5 : iovs[1].iov_base = buf + 16 + 512 + 32;
3548 : 5 : iovs[1].iov_len = 1024;
3549 : :
3550 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3551 : 5 : CU_ASSERT(rc == 0);
3552 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3553 : 5 : stub_complete_io(2);
3554 : :
3555 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3556 : 5 : CU_ASSERT(rc == 0);
3557 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3558 : 5 : stub_complete_io(2);
3559 : :
3560 : : /* 512 * 64 with 32 IO boundary */
3561 : 5 : bdev->optimal_io_boundary = 32;
3562 : 5 : iovcnt = 2;
3563 : 5 : iovs[0].iov_base = buf + 16;
3564 : 5 : iovs[0].iov_len = 16384;
3565 : 5 : iovs[1].iov_base = buf + 16 + 16384 + 32;
3566 : 5 : iovs[1].iov_len = 16384;
3567 : :
3568 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3569 : 5 : CU_ASSERT(rc == 0);
3570 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3571 : 5 : stub_complete_io(3);
3572 : :
3573 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3574 : 5 : CU_ASSERT(rc == 0);
3575 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3576 : 5 : stub_complete_io(3);
3577 : :
3578 : : /* 512 * 160 with 32 IO boundary */
3579 : 5 : iovcnt = 1;
3580 : 5 : iovs[0].iov_base = buf + 16;
3581 : 5 : iovs[0].iov_len = 16384 + 65536;
3582 : :
3583 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3584 : 5 : CU_ASSERT(rc == 0);
3585 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
3586 : 5 : stub_complete_io(6);
3587 : :
3588 : 5 : spdk_put_io_channel(io_ch);
3589 : 5 : spdk_bdev_close(desc);
3590 : 5 : free_bdev(bdev);
3591 : 5 : fn_table.submit_request = stub_submit_request;
3592 : 5 : ut_fini_bdev();
3593 : :
3594 : 5 : free(buf);
3595 : 5 : }
3596 : :
3597 : : static void
3598 : 10 : histogram_status_cb(void *cb_arg, int status)
3599 : : {
3600 : 10 : g_status = status;
3601 : 10 : }
3602 : :
3603 : : static void
3604 : 15 : histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3605 : : {
3606 : 15 : g_status = status;
3607 : 15 : g_histogram = histogram;
3608 : 15 : }
3609 : :
3610 : : static void
3611 : 111360 : histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
3612 : : uint64_t total, uint64_t so_far)
3613 : : {
3614 : 111360 : g_count += count;
3615 : 111360 : }
3616 : :
3617 : : static void
3618 : 10 : histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3619 : : {
3620 : 10 : spdk_histogram_data_fn cb_fn = cb_arg;
3621 : :
3622 : 10 : g_status = status;
3623 : :
3624 [ + + ]: 10 : if (status == 0) {
3625 : 5 : spdk_histogram_data_iterate(histogram, cb_fn, NULL);
3626 : : }
3627 : 10 : }
3628 : :
3629 : : static void
3630 : 5 : bdev_histograms(void)
3631 : : {
3632 : : struct spdk_bdev *bdev;
3633 : 5 : struct spdk_bdev_desc *desc = NULL;
3634 : : struct spdk_io_channel *ch;
3635 : : struct spdk_histogram_data *histogram;
3636 : 4 : uint8_t buf[4096];
3637 : : int rc;
3638 : :
3639 : 5 : ut_init_bdev(NULL);
3640 : :
3641 : 5 : bdev = allocate_bdev("bdev");
3642 : :
3643 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3644 : 5 : CU_ASSERT(rc == 0);
3645 : 5 : CU_ASSERT(desc != NULL);
3646 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3647 : :
3648 : 5 : ch = spdk_bdev_get_io_channel(desc);
3649 : 5 : CU_ASSERT(ch != NULL);
3650 : :
3651 : : /* Enable histogram */
3652 : 5 : g_status = -1;
3653 : 5 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
3654 : 5 : poll_threads();
3655 : 5 : CU_ASSERT(g_status == 0);
3656 [ - + ]: 5 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3657 : :
3658 : : /* Allocate histogram */
3659 : 5 : histogram = spdk_histogram_data_alloc();
3660 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(histogram != NULL);
3661 : :
3662 : : /* Check if histogram is zeroed */
3663 : 5 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3664 : 5 : poll_threads();
3665 : 5 : CU_ASSERT(g_status == 0);
3666 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3667 : :
3668 : 5 : g_count = 0;
3669 : 5 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3670 : :
3671 : 5 : CU_ASSERT(g_count == 0);
3672 : :
3673 : 5 : rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3674 : 5 : CU_ASSERT(rc == 0);
3675 : :
3676 : 5 : spdk_delay_us(10);
3677 : 5 : stub_complete_io(1);
3678 : 5 : poll_threads();
3679 : :
3680 : 5 : rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3681 : 5 : CU_ASSERT(rc == 0);
3682 : :
3683 : 5 : spdk_delay_us(10);
3684 : 5 : stub_complete_io(1);
3685 : 5 : poll_threads();
3686 : :
3687 : : /* Check if histogram gathered data from all I/O channels */
3688 : 5 : g_histogram = NULL;
3689 : 5 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3690 : 5 : poll_threads();
3691 : 5 : CU_ASSERT(g_status == 0);
3692 [ - + ]: 5 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3693 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3694 : :
3695 : 5 : g_count = 0;
3696 : 5 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3697 : 5 : CU_ASSERT(g_count == 2);
3698 : :
3699 : 5 : g_count = 0;
3700 : 5 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count);
3701 : 5 : CU_ASSERT(g_status == 0);
3702 : 5 : CU_ASSERT(g_count == 2);
3703 : :
3704 : : /* Disable histogram */
3705 : 5 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
3706 : 5 : poll_threads();
3707 : 5 : CU_ASSERT(g_status == 0);
3708 [ - + ]: 5 : CU_ASSERT(bdev->internal.histogram_enabled == false);
3709 : :
3710 : : /* Try to run histogram commands on disabled bdev */
3711 : 5 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3712 : 5 : poll_threads();
3713 : 5 : CU_ASSERT(g_status == -EFAULT);
3714 : :
3715 : 5 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL);
3716 : 5 : CU_ASSERT(g_status == -EFAULT);
3717 : :
3718 : 5 : spdk_histogram_data_free(histogram);
3719 : 5 : spdk_put_io_channel(ch);
3720 : 5 : spdk_bdev_close(desc);
3721 : 5 : free_bdev(bdev);
3722 : 5 : ut_fini_bdev();
3723 : 5 : }
3724 : :
3725 : : static void
3726 : 10 : _bdev_compare(bool emulated)
3727 : : {
3728 : : struct spdk_bdev *bdev;
3729 : 10 : struct spdk_bdev_desc *desc = NULL;
3730 : : struct spdk_io_channel *ioch;
3731 : : struct ut_expected_io *expected_io;
3732 : : uint64_t offset, num_blocks;
3733 : : uint32_t num_completed;
3734 : 8 : char aa_buf[512];
3735 : 8 : char bb_buf[512];
3736 : 8 : struct iovec compare_iov;
3737 : : uint8_t expected_io_type;
3738 : : int rc;
3739 : :
3740 [ + + ]: 10 : if (emulated) {
3741 : 5 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3742 : : } else {
3743 : 5 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3744 : : }
3745 : :
3746 [ - + ]: 10 : memset(aa_buf, 0xaa, sizeof(aa_buf));
3747 [ - + ]: 10 : memset(bb_buf, 0xbb, sizeof(bb_buf));
3748 : :
3749 : 10 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3750 : :
3751 : 10 : ut_init_bdev(NULL);
3752 : 10 : fn_table.submit_request = stub_submit_request_get_buf;
3753 : 10 : bdev = allocate_bdev("bdev");
3754 : :
3755 : 10 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3756 : 10 : CU_ASSERT_EQUAL(rc, 0);
3757 [ - + ]: 10 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3758 : 10 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3759 : 10 : ioch = spdk_bdev_get_io_channel(desc);
3760 [ - + ]: 10 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3761 : :
3762 : 10 : fn_table.submit_request = stub_submit_request_get_buf;
3763 : 10 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3764 : :
3765 : 10 : offset = 50;
3766 : 10 : num_blocks = 1;
3767 : 10 : compare_iov.iov_base = aa_buf;
3768 : 10 : compare_iov.iov_len = sizeof(aa_buf);
3769 : :
3770 : : /* 1. successful comparev */
3771 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3772 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3773 : :
3774 : 10 : g_io_done = false;
3775 : 10 : g_compare_read_buf = aa_buf;
3776 : 10 : g_compare_read_buf_len = sizeof(aa_buf);
3777 : 10 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3778 : 10 : CU_ASSERT_EQUAL(rc, 0);
3779 : 10 : num_completed = stub_complete_io(1);
3780 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3781 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3782 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3783 : :
3784 : : /* 2. miscompare comparev */
3785 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3786 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3787 : :
3788 : 10 : g_io_done = false;
3789 : 10 : g_compare_read_buf = bb_buf;
3790 : 10 : g_compare_read_buf_len = sizeof(bb_buf);
3791 : 10 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3792 : 10 : CU_ASSERT_EQUAL(rc, 0);
3793 : 10 : num_completed = stub_complete_io(1);
3794 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3795 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3796 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3797 : :
3798 : : /* 3. successful compare */
3799 : 10 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3800 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3801 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3802 : :
3803 : 10 : g_io_done = false;
3804 : 10 : g_compare_read_buf = aa_buf;
3805 : 10 : g_compare_read_buf_len = sizeof(aa_buf);
3806 : 10 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3807 : 10 : CU_ASSERT_EQUAL(rc, 0);
3808 : 10 : num_completed = stub_complete_io(1);
3809 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3810 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3811 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3812 : :
3813 : : /* 4. miscompare compare */
3814 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3815 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3816 : :
3817 : 10 : g_io_done = false;
3818 : 10 : g_compare_read_buf = bb_buf;
3819 : 10 : g_compare_read_buf_len = sizeof(bb_buf);
3820 : 10 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3821 : 10 : CU_ASSERT_EQUAL(rc, 0);
3822 : 10 : num_completed = stub_complete_io(1);
3823 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3824 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3825 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3826 : :
3827 : 10 : spdk_put_io_channel(ioch);
3828 : 10 : spdk_bdev_close(desc);
3829 : 10 : free_bdev(bdev);
3830 : 10 : fn_table.submit_request = stub_submit_request;
3831 : 10 : ut_fini_bdev();
3832 : :
3833 : 10 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3834 : :
3835 : 10 : g_compare_read_buf = NULL;
3836 : 10 : }
3837 : :
3838 : : static void
3839 : 10 : _bdev_compare_with_md(bool emulated)
3840 : : {
3841 : : struct spdk_bdev *bdev;
3842 : 10 : struct spdk_bdev_desc *desc = NULL;
3843 : : struct spdk_io_channel *ioch;
3844 : : struct ut_expected_io *expected_io;
3845 : : uint64_t offset, num_blocks;
3846 : : uint32_t num_completed;
3847 : 8 : char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3848 : 8 : char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3849 : 8 : char buf_miscompare[1024 /* 2 * blocklen */];
3850 : 8 : char md_buf[16];
3851 : 8 : char md_buf_miscompare[16];
3852 : 8 : struct iovec compare_iov;
3853 : : uint8_t expected_io_type;
3854 : : int rc;
3855 : :
3856 [ + + ]: 10 : if (emulated) {
3857 : 5 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3858 : : } else {
3859 : 5 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3860 : : }
3861 : :
3862 [ - + ]: 10 : memset(buf, 0xaa, sizeof(buf));
3863 [ - + ]: 10 : memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare));
3864 : : /* make last md different */
3865 [ - + ]: 10 : memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8);
3866 [ - + ]: 10 : memset(buf_miscompare, 0xbb, sizeof(buf_miscompare));
3867 [ - + ]: 10 : memset(md_buf, 0xaa, 16);
3868 [ - + ]: 10 : memset(md_buf_miscompare, 0xbb, 16);
3869 : :
3870 : 10 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3871 : :
3872 : 10 : ut_init_bdev(NULL);
3873 : 10 : fn_table.submit_request = stub_submit_request_get_buf;
3874 : 10 : bdev = allocate_bdev("bdev");
3875 : :
3876 : 10 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3877 : 10 : CU_ASSERT_EQUAL(rc, 0);
3878 [ - + ]: 10 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3879 : 10 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3880 : 10 : ioch = spdk_bdev_get_io_channel(desc);
3881 [ - + ]: 10 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3882 : :
3883 : 10 : fn_table.submit_request = stub_submit_request_get_buf;
3884 : 10 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3885 : :
3886 : 10 : offset = 50;
3887 : 10 : num_blocks = 2;
3888 : :
3889 : : /* interleaved md & data */
3890 : 10 : bdev->md_interleave = true;
3891 : 10 : bdev->md_len = 8;
3892 : 10 : bdev->blocklen = 512 + 8;
3893 : 10 : compare_iov.iov_base = buf;
3894 : 10 : compare_iov.iov_len = sizeof(buf);
3895 : :
3896 : : /* 1. successful compare with md interleaved */
3897 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3898 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3899 : :
3900 : 10 : g_io_done = false;
3901 : 10 : g_compare_read_buf = buf;
3902 : 10 : g_compare_read_buf_len = sizeof(buf);
3903 : 10 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3904 : 10 : CU_ASSERT_EQUAL(rc, 0);
3905 : 10 : num_completed = stub_complete_io(1);
3906 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3907 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3908 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3909 : :
3910 : : /* 2. miscompare with md interleaved */
3911 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3912 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3913 : :
3914 : 10 : g_io_done = false;
3915 : 10 : g_compare_read_buf = buf_interleaved_miscompare;
3916 : 10 : g_compare_read_buf_len = sizeof(buf_interleaved_miscompare);
3917 : 10 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3918 : 10 : CU_ASSERT_EQUAL(rc, 0);
3919 : 10 : num_completed = stub_complete_io(1);
3920 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3921 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3922 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3923 : :
3924 : : /* Separate data & md buffers */
3925 : 10 : bdev->md_interleave = false;
3926 : 10 : bdev->blocklen = 512;
3927 : 10 : compare_iov.iov_base = buf;
3928 : 10 : compare_iov.iov_len = 1024;
3929 : :
3930 : : /* 3. successful compare with md separated */
3931 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3932 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3933 : :
3934 : 10 : g_io_done = false;
3935 : 10 : g_compare_read_buf = buf;
3936 : 10 : g_compare_read_buf_len = 1024;
3937 : 10 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3938 : 10 : g_compare_md_buf = md_buf;
3939 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3940 : : offset, num_blocks, io_done, NULL);
3941 : 10 : CU_ASSERT_EQUAL(rc, 0);
3942 : 10 : num_completed = stub_complete_io(1);
3943 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3944 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3945 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3946 : :
3947 : : /* 4. miscompare with md separated where md buf is different */
3948 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3949 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3950 : :
3951 : 10 : g_io_done = false;
3952 : 10 : g_compare_read_buf = buf;
3953 : 10 : g_compare_read_buf_len = 1024;
3954 : 10 : g_compare_md_buf = md_buf_miscompare;
3955 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3956 : : offset, num_blocks, io_done, NULL);
3957 : 10 : CU_ASSERT_EQUAL(rc, 0);
3958 : 10 : num_completed = stub_complete_io(1);
3959 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3960 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3961 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3962 : :
3963 : : /* 5. miscompare with md separated where buf is different */
3964 : 10 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3965 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3966 : :
3967 : 10 : g_io_done = false;
3968 : 10 : g_compare_read_buf = buf_miscompare;
3969 : 10 : g_compare_read_buf_len = sizeof(buf_miscompare);
3970 : 10 : g_compare_md_buf = md_buf;
3971 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3972 : : offset, num_blocks, io_done, NULL);
3973 : 10 : CU_ASSERT_EQUAL(rc, 0);
3974 : 10 : num_completed = stub_complete_io(1);
3975 : 10 : CU_ASSERT_EQUAL(num_completed, 1);
3976 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
3977 : 10 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3978 : :
3979 : 10 : bdev->md_len = 0;
3980 : 10 : g_compare_md_buf = NULL;
3981 : :
3982 : 10 : spdk_put_io_channel(ioch);
3983 : 10 : spdk_bdev_close(desc);
3984 : 10 : free_bdev(bdev);
3985 : 10 : fn_table.submit_request = stub_submit_request;
3986 : 10 : ut_fini_bdev();
3987 : :
3988 : 10 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3989 : :
3990 : 10 : g_compare_read_buf = NULL;
3991 : 10 : }
3992 : :
3993 : : static void
3994 : 5 : bdev_compare(void)
3995 : : {
3996 : 5 : _bdev_compare(false);
3997 : 5 : _bdev_compare_with_md(false);
3998 : 5 : }
3999 : :
4000 : : static void
4001 : 5 : bdev_compare_emulated(void)
4002 : : {
4003 : 5 : _bdev_compare(true);
4004 : 5 : _bdev_compare_with_md(true);
4005 : 5 : }
4006 : :
4007 : : static void
4008 : 5 : bdev_compare_and_write(void)
4009 : : {
4010 : : struct spdk_bdev *bdev;
4011 : 5 : struct spdk_bdev_desc *desc = NULL;
4012 : : struct spdk_io_channel *ioch;
4013 : : struct ut_expected_io *expected_io;
4014 : : uint64_t offset, num_blocks;
4015 : : uint32_t num_completed;
4016 : 4 : char aa_buf[512];
4017 : 4 : char bb_buf[512];
4018 : 4 : char cc_buf[512];
4019 : 4 : char write_buf[512];
4020 : 4 : struct iovec compare_iov;
4021 : 4 : struct iovec write_iov;
4022 : : int rc;
4023 : :
4024 : 5 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4025 : 5 : memset(bb_buf, 0xbb, sizeof(bb_buf));
4026 : 5 : memset(cc_buf, 0xcc, sizeof(cc_buf));
4027 : :
4028 : 5 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
4029 : :
4030 : 5 : ut_init_bdev(NULL);
4031 : 5 : fn_table.submit_request = stub_submit_request_get_buf;
4032 : 5 : bdev = allocate_bdev("bdev");
4033 : :
4034 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4035 : 5 : CU_ASSERT_EQUAL(rc, 0);
4036 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4037 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4038 : 5 : ioch = spdk_bdev_get_io_channel(desc);
4039 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4040 : :
4041 : 5 : fn_table.submit_request = stub_submit_request_get_buf;
4042 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4043 : :
4044 : 5 : offset = 50;
4045 : 5 : num_blocks = 1;
4046 : 5 : compare_iov.iov_base = aa_buf;
4047 : 5 : compare_iov.iov_len = sizeof(aa_buf);
4048 : 5 : write_iov.iov_base = bb_buf;
4049 : 5 : write_iov.iov_len = sizeof(bb_buf);
4050 : :
4051 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4052 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4053 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
4054 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4055 : :
4056 : 5 : g_io_done = false;
4057 : 5 : g_compare_read_buf = aa_buf;
4058 : 5 : g_compare_read_buf_len = sizeof(aa_buf);
4059 : 5 : memset(write_buf, 0, sizeof(write_buf));
4060 : 5 : g_compare_write_buf = write_buf;
4061 : 5 : g_compare_write_buf_len = sizeof(write_buf);
4062 : 5 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4063 : : offset, num_blocks, io_done, NULL);
4064 : : /* Trigger range locking */
4065 : 5 : poll_threads();
4066 : 5 : CU_ASSERT_EQUAL(rc, 0);
4067 : 5 : num_completed = stub_complete_io(1);
4068 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4069 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
4070 : 5 : num_completed = stub_complete_io(1);
4071 : : /* Trigger range unlocking */
4072 : 5 : poll_threads();
4073 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4074 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
4075 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4076 : 5 : CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
4077 : :
4078 : : /* Test miscompare */
4079 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4080 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4081 : :
4082 : 5 : g_io_done = false;
4083 : 5 : g_compare_read_buf = cc_buf;
4084 : 5 : g_compare_read_buf_len = sizeof(cc_buf);
4085 : 5 : memset(write_buf, 0, sizeof(write_buf));
4086 : 5 : g_compare_write_buf = write_buf;
4087 : 5 : g_compare_write_buf_len = sizeof(write_buf);
4088 : 5 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4089 : : offset, num_blocks, io_done, NULL);
4090 : : /* Trigger range locking */
4091 : 5 : poll_threads();
4092 : 5 : CU_ASSERT_EQUAL(rc, 0);
4093 : 5 : num_completed = stub_complete_io(1);
4094 : : /* Trigger range unlocking earlier because we expect error here */
4095 : 5 : poll_threads();
4096 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4097 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
4098 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
4099 : 5 : num_completed = stub_complete_io(1);
4100 : 5 : CU_ASSERT_EQUAL(num_completed, 0);
4101 : :
4102 : 5 : spdk_put_io_channel(ioch);
4103 : 5 : spdk_bdev_close(desc);
4104 : 5 : free_bdev(bdev);
4105 : 5 : fn_table.submit_request = stub_submit_request;
4106 : 5 : ut_fini_bdev();
4107 : :
4108 : 5 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
4109 : :
4110 : 5 : g_compare_read_buf = NULL;
4111 : 5 : g_compare_write_buf = NULL;
4112 : 5 : }
4113 : :
4114 : : static void
4115 : 5 : bdev_write_zeroes(void)
4116 : : {
4117 : : struct spdk_bdev *bdev;
4118 : 5 : struct spdk_bdev_desc *desc = NULL;
4119 : : struct spdk_io_channel *ioch;
4120 : : struct ut_expected_io *expected_io;
4121 : : uint64_t offset, num_io_blocks, num_blocks;
4122 : : uint32_t num_completed, num_requests;
4123 : : int rc;
4124 : :
4125 : 5 : ut_init_bdev(NULL);
4126 : 5 : bdev = allocate_bdev("bdev");
4127 : :
4128 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4129 : 5 : CU_ASSERT_EQUAL(rc, 0);
4130 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4131 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4132 : 5 : ioch = spdk_bdev_get_io_channel(desc);
4133 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4134 : :
4135 : 5 : fn_table.submit_request = stub_submit_request;
4136 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4137 : :
4138 : : /* First test that if the bdev supports write_zeroes, the request won't be split */
4139 : 5 : bdev->md_len = 0;
4140 : 5 : bdev->blocklen = 4096;
4141 [ - + ]: 5 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4142 : :
4143 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
4144 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4145 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4146 : 5 : CU_ASSERT_EQUAL(rc, 0);
4147 : 5 : num_completed = stub_complete_io(1);
4148 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4149 : :
4150 : : /* Check that if write zeroes is not supported it'll be replaced by regular writes */
4151 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
4152 : 5 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4153 [ - + ]: 5 : num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
4154 : 5 : num_requests = 2;
4155 [ - + ]: 5 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
4156 : :
4157 [ + + ]: 15 : for (offset = 0; offset < num_requests; ++offset) {
4158 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4159 : : offset * num_io_blocks, num_io_blocks, 0);
4160 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4161 : : }
4162 : :
4163 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4164 : 5 : CU_ASSERT_EQUAL(rc, 0);
4165 : 5 : num_completed = stub_complete_io(num_requests);
4166 : 5 : CU_ASSERT_EQUAL(num_completed, num_requests);
4167 : :
4168 : : /* Check that the splitting is correct if bdev has interleaved metadata */
4169 : 5 : bdev->md_interleave = true;
4170 : 5 : bdev->md_len = 64;
4171 : 5 : bdev->blocklen = 4096 + 64;
4172 : 5 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4173 [ - + ]: 5 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4174 : :
4175 : 5 : num_requests = offset = 0;
4176 [ + + ]: 15 : while (offset < num_blocks) {
4177 [ - + + + : 10 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
- + ]
4178 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4179 : : offset, num_io_blocks, 0);
4180 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4181 : 10 : offset += num_io_blocks;
4182 : 10 : num_requests++;
4183 : : }
4184 : :
4185 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4186 : 5 : CU_ASSERT_EQUAL(rc, 0);
4187 : 5 : num_completed = stub_complete_io(num_requests);
4188 : 5 : CU_ASSERT_EQUAL(num_completed, num_requests);
4189 : 5 : num_completed = stub_complete_io(num_requests);
4190 [ - + ]: 5 : assert(num_completed == 0);
4191 : :
4192 : : /* Check the the same for separate metadata buffer */
4193 : 5 : bdev->md_interleave = false;
4194 : 5 : bdev->md_len = 64;
4195 : 5 : bdev->blocklen = 4096;
4196 : 5 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4197 : :
4198 : 5 : num_requests = offset = 0;
4199 [ + + ]: 15 : while (offset < num_blocks) {
4200 [ - + + - : 10 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
- + ]
4201 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4202 : : offset, num_io_blocks, 0);
4203 : 10 : expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
4204 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4205 : 10 : offset += num_io_blocks;
4206 : 10 : num_requests++;
4207 : : }
4208 : :
4209 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4210 : 5 : CU_ASSERT_EQUAL(rc, 0);
4211 : 5 : num_completed = stub_complete_io(num_requests);
4212 : 5 : CU_ASSERT_EQUAL(num_completed, num_requests);
4213 : :
4214 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
4215 : 5 : spdk_put_io_channel(ioch);
4216 : 5 : spdk_bdev_close(desc);
4217 : 5 : free_bdev(bdev);
4218 : 5 : ut_fini_bdev();
4219 : 5 : }
4220 : :
4221 : : static void
4222 : 5 : bdev_zcopy_write(void)
4223 : : {
4224 : : struct spdk_bdev *bdev;
4225 : 5 : struct spdk_bdev_desc *desc = NULL;
4226 : : struct spdk_io_channel *ioch;
4227 : : struct ut_expected_io *expected_io;
4228 : : uint64_t offset, num_blocks;
4229 : : uint32_t num_completed;
4230 : 4 : char aa_buf[512];
4231 : 4 : struct iovec iov;
4232 : : int rc;
4233 : 5 : const bool populate = false;
4234 : 5 : const bool commit = true;
4235 : :
4236 [ - + ]: 5 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4237 : :
4238 : 5 : ut_init_bdev(NULL);
4239 : 5 : bdev = allocate_bdev("bdev");
4240 : :
4241 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4242 : 5 : CU_ASSERT_EQUAL(rc, 0);
4243 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4244 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4245 : 5 : ioch = spdk_bdev_get_io_channel(desc);
4246 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4247 : :
4248 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4249 : :
4250 : 5 : offset = 50;
4251 : 5 : num_blocks = 1;
4252 : 5 : iov.iov_base = NULL;
4253 : 5 : iov.iov_len = 0;
4254 : :
4255 : 5 : g_zcopy_read_buf = (void *) 0x1122334455667788UL;
4256 : 5 : g_zcopy_read_buf_len = (uint32_t) -1;
4257 : : /* Do a zcopy start for a write (populate=false) */
4258 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4259 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4260 : 5 : g_io_done = false;
4261 : 5 : g_zcopy_write_buf = aa_buf;
4262 : 5 : g_zcopy_write_buf_len = sizeof(aa_buf);
4263 : 5 : g_zcopy_bdev_io = NULL;
4264 : 5 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4265 : 5 : CU_ASSERT_EQUAL(rc, 0);
4266 : 5 : num_completed = stub_complete_io(1);
4267 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4268 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
4269 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4270 : : /* Check that the iov has been set up */
4271 : 5 : CU_ASSERT(iov.iov_base == g_zcopy_write_buf);
4272 : 5 : CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len);
4273 : : /* Check that the bdev_io has been saved */
4274 : 5 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4275 : : /* Now do the zcopy end for a write (commit=true) */
4276 : 5 : g_io_done = false;
4277 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4278 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4279 : 5 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4280 : 5 : CU_ASSERT_EQUAL(rc, 0);
4281 : 5 : num_completed = stub_complete_io(1);
4282 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4283 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
4284 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4285 : : /* Check the g_zcopy are reset by io_done */
4286 : 5 : CU_ASSERT(g_zcopy_write_buf == NULL);
4287 : 5 : CU_ASSERT(g_zcopy_write_buf_len == 0);
4288 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4289 : 5 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4290 : :
4291 : : /* Check the zcopy read buffer has not been touched which
4292 : : * ensures that the correct buffers were used.
4293 : : */
4294 : 5 : CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL);
4295 : 5 : CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1);
4296 : :
4297 : 5 : spdk_put_io_channel(ioch);
4298 : 5 : spdk_bdev_close(desc);
4299 : 5 : free_bdev(bdev);
4300 : 5 : ut_fini_bdev();
4301 : 5 : }
4302 : :
4303 : : static void
4304 : 5 : bdev_zcopy_read(void)
4305 : : {
4306 : : struct spdk_bdev *bdev;
4307 : 5 : struct spdk_bdev_desc *desc = NULL;
4308 : : struct spdk_io_channel *ioch;
4309 : : struct ut_expected_io *expected_io;
4310 : : uint64_t offset, num_blocks;
4311 : : uint32_t num_completed;
4312 : 4 : char aa_buf[512];
4313 : 4 : struct iovec iov;
4314 : : int rc;
4315 : 5 : const bool populate = true;
4316 : 5 : const bool commit = false;
4317 : :
4318 [ - + ]: 5 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4319 : :
4320 : 5 : ut_init_bdev(NULL);
4321 : 5 : bdev = allocate_bdev("bdev");
4322 : :
4323 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4324 : 5 : CU_ASSERT_EQUAL(rc, 0);
4325 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4326 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4327 : 5 : ioch = spdk_bdev_get_io_channel(desc);
4328 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4329 : :
4330 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4331 : :
4332 : 5 : offset = 50;
4333 : 5 : num_blocks = 1;
4334 : 5 : iov.iov_base = NULL;
4335 : 5 : iov.iov_len = 0;
4336 : :
4337 : 5 : g_zcopy_write_buf = (void *) 0x1122334455667788UL;
4338 : 5 : g_zcopy_write_buf_len = (uint32_t) -1;
4339 : :
4340 : : /* Do a zcopy start for a read (populate=true) */
4341 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4342 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4343 : 5 : g_io_done = false;
4344 : 5 : g_zcopy_read_buf = aa_buf;
4345 : 5 : g_zcopy_read_buf_len = sizeof(aa_buf);
4346 : 5 : g_zcopy_bdev_io = NULL;
4347 : 5 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4348 : 5 : CU_ASSERT_EQUAL(rc, 0);
4349 : 5 : num_completed = stub_complete_io(1);
4350 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4351 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
4352 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4353 : : /* Check that the iov has been set up */
4354 : 5 : CU_ASSERT(iov.iov_base == g_zcopy_read_buf);
4355 : 5 : CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len);
4356 : : /* Check that the bdev_io has been saved */
4357 : 5 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4358 : :
4359 : : /* Now do the zcopy end for a read (commit=false) */
4360 : 5 : g_io_done = false;
4361 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4362 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4363 : 5 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4364 : 5 : CU_ASSERT_EQUAL(rc, 0);
4365 : 5 : num_completed = stub_complete_io(1);
4366 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
4367 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
4368 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4369 : : /* Check the g_zcopy are reset by io_done */
4370 : 5 : CU_ASSERT(g_zcopy_read_buf == NULL);
4371 : 5 : CU_ASSERT(g_zcopy_read_buf_len == 0);
4372 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4373 : 5 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4374 : :
4375 : : /* Check the zcopy write buffer has not been touched which
4376 : : * ensures that the correct buffers were used.
4377 : : */
4378 : 5 : CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL);
4379 : 5 : CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1);
4380 : :
4381 : 5 : spdk_put_io_channel(ioch);
4382 : 5 : spdk_bdev_close(desc);
4383 : 5 : free_bdev(bdev);
4384 : 5 : ut_fini_bdev();
4385 : 5 : }
4386 : :
4387 : : static void
4388 : 5 : bdev_open_while_hotremove(void)
4389 : : {
4390 : : struct spdk_bdev *bdev;
4391 : 5 : struct spdk_bdev_desc *desc[2] = {};
4392 : : int rc;
4393 : :
4394 : 5 : bdev = allocate_bdev("bdev");
4395 : :
4396 : 5 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]);
4397 : 5 : CU_ASSERT(rc == 0);
4398 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
4399 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0]));
4400 : :
4401 : 5 : spdk_bdev_unregister(bdev, NULL, NULL);
4402 : : /* Bdev unregister is handled asynchronously. Poll thread to complete. */
4403 : 5 : poll_threads();
4404 : :
4405 : 5 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]);
4406 : 5 : CU_ASSERT(rc == -ENODEV);
4407 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
4408 : :
4409 : 5 : spdk_bdev_close(desc[0]);
4410 : 5 : free_bdev(bdev);
4411 : 5 : }
4412 : :
4413 : : static void
4414 : 5 : bdev_close_while_hotremove(void)
4415 : : {
4416 : : struct spdk_bdev *bdev;
4417 : 5 : struct spdk_bdev_desc *desc = NULL;
4418 : 5 : int rc = 0;
4419 : :
4420 : 5 : bdev = allocate_bdev("bdev");
4421 : :
4422 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
4423 : 5 : CU_ASSERT_EQUAL(rc, 0);
4424 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4425 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4426 : :
4427 : : /* Simulate hot-unplug by unregistering bdev */
4428 : 5 : g_event_type1 = 0xFF;
4429 : 5 : g_unregister_arg = NULL;
4430 : 5 : g_unregister_rc = -1;
4431 : 5 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4432 : : /* Close device while remove event is in flight */
4433 : 5 : spdk_bdev_close(desc);
4434 : :
4435 : : /* Ensure that unregister callback is delayed */
4436 : 5 : CU_ASSERT_EQUAL(g_unregister_arg, NULL);
4437 : 5 : CU_ASSERT_EQUAL(g_unregister_rc, -1);
4438 : :
4439 : 5 : poll_threads();
4440 : :
4441 : : /* Event callback shall not be issued because device was closed */
4442 : 5 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
4443 : : /* Unregister callback is issued */
4444 : 5 : CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
4445 : 5 : CU_ASSERT_EQUAL(g_unregister_rc, 0);
4446 : :
4447 : 5 : free_bdev(bdev);
4448 : 5 : }
4449 : :
4450 : : static void
4451 : 5 : bdev_open_ext_test(void)
4452 : : {
4453 : : struct spdk_bdev *bdev;
4454 : 5 : struct spdk_bdev_desc *desc1 = NULL;
4455 : 5 : struct spdk_bdev_desc *desc2 = NULL;
4456 : 5 : int rc = 0;
4457 : :
4458 : 5 : bdev = allocate_bdev("bdev");
4459 : :
4460 : 5 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4461 : 5 : CU_ASSERT_EQUAL(rc, -EINVAL);
4462 : :
4463 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4464 : 5 : CU_ASSERT_EQUAL(rc, 0);
4465 : :
4466 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4467 : 5 : CU_ASSERT_EQUAL(rc, 0);
4468 : :
4469 : 5 : g_event_type1 = 0xFF;
4470 : 5 : g_event_type2 = 0xFF;
4471 : :
4472 : : /* Simulate hot-unplug by unregistering bdev */
4473 : 5 : spdk_bdev_unregister(bdev, NULL, NULL);
4474 : 5 : poll_threads();
4475 : :
4476 : : /* Check if correct events have been triggered in event callback fn */
4477 : 5 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4478 : 5 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4479 : :
4480 : 5 : free_bdev(bdev);
4481 : 5 : poll_threads();
4482 : 5 : }
4483 : :
4484 : : static void
4485 : 5 : bdev_open_ext_unregister(void)
4486 : : {
4487 : : struct spdk_bdev *bdev;
4488 : 5 : struct spdk_bdev_desc *desc1 = NULL;
4489 : 5 : struct spdk_bdev_desc *desc2 = NULL;
4490 : 5 : struct spdk_bdev_desc *desc3 = NULL;
4491 : 5 : struct spdk_bdev_desc *desc4 = NULL;
4492 : 5 : int rc = 0;
4493 : :
4494 : 5 : bdev = allocate_bdev("bdev");
4495 : :
4496 : 5 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4497 : 5 : CU_ASSERT_EQUAL(rc, -EINVAL);
4498 : :
4499 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4500 : 5 : CU_ASSERT_EQUAL(rc, 0);
4501 : :
4502 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4503 : 5 : CU_ASSERT_EQUAL(rc, 0);
4504 : :
4505 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3);
4506 : 5 : CU_ASSERT_EQUAL(rc, 0);
4507 : :
4508 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4);
4509 : 5 : CU_ASSERT_EQUAL(rc, 0);
4510 : :
4511 : 5 : g_event_type1 = 0xFF;
4512 : 5 : g_event_type2 = 0xFF;
4513 : 5 : g_event_type3 = 0xFF;
4514 : 5 : g_event_type4 = 0xFF;
4515 : :
4516 : 5 : g_unregister_arg = NULL;
4517 : 5 : g_unregister_rc = -1;
4518 : :
4519 : : /* Simulate hot-unplug by unregistering bdev */
4520 : 5 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4521 : :
4522 : : /*
4523 : : * Unregister is handled asynchronously and event callback
4524 : : * (i.e., above bdev_open_cbN) will be called.
4525 : : * For bdev_open_cb3 and bdev_open_cb4, it is intended to not
4526 : : * close the desc3 and desc4 so that the bdev is not closed.
4527 : : */
4528 : 5 : poll_threads();
4529 : :
4530 : : /* Check if correct events have been triggered in event callback fn */
4531 : 5 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4532 : 5 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4533 : 5 : CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE);
4534 : 5 : CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE);
4535 : :
4536 : : /* Check that unregister callback is delayed */
4537 : 5 : CU_ASSERT(g_unregister_arg == NULL);
4538 : 5 : CU_ASSERT(g_unregister_rc == -1);
4539 : :
4540 : : /*
4541 : : * Explicitly close desc3. As desc4 is still opened there, the
4542 : : * unergister callback is still delayed to execute.
4543 : : */
4544 : 5 : spdk_bdev_close(desc3);
4545 : 5 : CU_ASSERT(g_unregister_arg == NULL);
4546 : 5 : CU_ASSERT(g_unregister_rc == -1);
4547 : :
4548 : : /*
4549 : : * Explicitly close desc4 to trigger the ongoing bdev unregister
4550 : : * operation after last desc is closed.
4551 : : */
4552 : 5 : spdk_bdev_close(desc4);
4553 : :
4554 : : /* Poll the thread for the async unregister operation */
4555 : 5 : poll_threads();
4556 : :
4557 : : /* Check that unregister callback is executed */
4558 : 5 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
4559 : 5 : CU_ASSERT(g_unregister_rc == 0);
4560 : :
4561 : 5 : free_bdev(bdev);
4562 : 5 : poll_threads();
4563 : 5 : }
4564 : :
4565 : : struct timeout_io_cb_arg {
4566 : : struct iovec iov;
4567 : : uint8_t type;
4568 : : };
4569 : :
4570 : : static int
4571 : 70 : bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
4572 : : {
4573 : : struct spdk_bdev_io *bdev_io;
4574 : 70 : int n = 0;
4575 : :
4576 [ - + ]: 70 : if (!ch) {
4577 : 0 : return -1;
4578 : : }
4579 : :
4580 [ + + ]: 145 : TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
4581 : 75 : n++;
4582 : : }
4583 : :
4584 : 70 : return n;
4585 : : }
4586 : :
4587 : : static void
4588 : 15 : bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
4589 : : {
4590 : 15 : struct timeout_io_cb_arg *ctx = cb_arg;
4591 : :
4592 : 15 : ctx->type = bdev_io->type;
4593 : 15 : ctx->iov.iov_base = bdev_io->iov.iov_base;
4594 : 15 : ctx->iov.iov_len = bdev_io->iov.iov_len;
4595 : 15 : }
4596 : :
4597 : : static void
4598 : 5 : bdev_set_io_timeout(void)
4599 : : {
4600 : : struct spdk_bdev *bdev;
4601 : 5 : struct spdk_bdev_desc *desc = NULL;
4602 : 5 : struct spdk_io_channel *io_ch = NULL;
4603 : 5 : struct spdk_bdev_channel *bdev_ch = NULL;
4604 : 4 : struct timeout_io_cb_arg cb_arg;
4605 : :
4606 : 5 : ut_init_bdev(NULL);
4607 : 5 : bdev = allocate_bdev("bdev");
4608 : :
4609 : 5 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4610 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4611 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4612 : :
4613 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
4614 : 5 : CU_ASSERT(io_ch != NULL);
4615 : :
4616 : 5 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4617 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4618 : :
4619 : : /* This is the part1.
4620 : : * We will check the bdev_ch->io_submitted list
4621 : : * TO make sure that it can link IOs and only the user submitted IOs
4622 : : */
4623 : 5 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4624 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4625 : 5 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4626 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4627 : 5 : stub_complete_io(1);
4628 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4629 : 5 : stub_complete_io(1);
4630 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4631 : :
4632 : : /* Split IO */
4633 : 5 : bdev->optimal_io_boundary = 16;
4634 : 5 : bdev->split_on_optimal_io_boundary = true;
4635 : :
4636 : : /* Now test that a single-vector command is split correctly.
4637 : : * Offset 14, length 8, payload 0xF000
4638 : : * Child - Offset 14, length 2, payload 0xF000
4639 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
4640 : : *
4641 : : * Set up the expected values before calling spdk_bdev_read_blocks
4642 : : */
4643 : 5 : CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4644 : : /* We count all submitted IOs including IO that are generated by splitting. */
4645 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
4646 : 5 : stub_complete_io(1);
4647 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4648 : 5 : stub_complete_io(1);
4649 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4650 : :
4651 : : /* Also include the reset IO */
4652 : 5 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4653 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4654 : 5 : poll_threads();
4655 : 5 : stub_complete_io(1);
4656 : 5 : poll_threads();
4657 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4658 : :
4659 : : /* This is part2
4660 : : * Test the desc timeout poller register
4661 : : */
4662 : :
4663 : : /* Successfully set the timeout */
4664 : 5 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4665 : 5 : CU_ASSERT(desc->io_timeout_poller != NULL);
4666 : 5 : CU_ASSERT(desc->timeout_in_sec == 30);
4667 : 5 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4668 : 5 : CU_ASSERT(desc->cb_arg == &cb_arg);
4669 : :
4670 : : /* Change the timeout limit */
4671 : 5 : CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4672 : 5 : CU_ASSERT(desc->io_timeout_poller != NULL);
4673 : 5 : CU_ASSERT(desc->timeout_in_sec == 20);
4674 : 5 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4675 : 5 : CU_ASSERT(desc->cb_arg == &cb_arg);
4676 : :
4677 : : /* Disable the timeout */
4678 : 5 : CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
4679 : 5 : CU_ASSERT(desc->io_timeout_poller == NULL);
4680 : :
4681 : : /* This the part3
4682 : : * We will test to catch timeout IO and check whether the IO is
4683 : : * the submitted one.
4684 : : */
4685 [ - + ]: 5 : memset(&cb_arg, 0, sizeof(cb_arg));
4686 : 5 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4687 : 5 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
4688 : :
4689 : : /* Don't reach the limit */
4690 : 5 : spdk_delay_us(15 * spdk_get_ticks_hz());
4691 : 5 : poll_threads();
4692 : 5 : CU_ASSERT(cb_arg.type == 0);
4693 : 5 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4694 : 5 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4695 : :
4696 : : /* 15 + 15 = 30 reach the limit */
4697 : 5 : spdk_delay_us(15 * spdk_get_ticks_hz());
4698 : 5 : poll_threads();
4699 : 5 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4700 : 5 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
4701 : 5 : CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
4702 : 5 : stub_complete_io(1);
4703 : :
4704 : : /* Use the same split IO above and check the IO */
4705 [ - + ]: 5 : memset(&cb_arg, 0, sizeof(cb_arg));
4706 : 5 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4707 : :
4708 : : /* The first child complete in time */
4709 : 5 : spdk_delay_us(15 * spdk_get_ticks_hz());
4710 : 5 : poll_threads();
4711 : 5 : stub_complete_io(1);
4712 : 5 : CU_ASSERT(cb_arg.type == 0);
4713 : 5 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4714 : 5 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4715 : :
4716 : : /* The second child reach the limit */
4717 : 5 : spdk_delay_us(15 * spdk_get_ticks_hz());
4718 : 5 : poll_threads();
4719 : 5 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4720 : 5 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
4721 : 5 : CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
4722 : 5 : stub_complete_io(1);
4723 : :
4724 : : /* Also include the reset IO */
4725 [ - + ]: 5 : memset(&cb_arg, 0, sizeof(cb_arg));
4726 : 5 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4727 : 5 : spdk_delay_us(30 * spdk_get_ticks_hz());
4728 : 5 : poll_threads();
4729 : 5 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
4730 : 5 : stub_complete_io(1);
4731 : 5 : poll_threads();
4732 : :
4733 : 5 : spdk_put_io_channel(io_ch);
4734 : 5 : spdk_bdev_close(desc);
4735 : 5 : free_bdev(bdev);
4736 : 5 : ut_fini_bdev();
4737 : 5 : }
4738 : :
4739 : : static void
4740 : 5 : bdev_set_qd_sampling(void)
4741 : : {
4742 : : struct spdk_bdev *bdev;
4743 : 5 : struct spdk_bdev_desc *desc = NULL;
4744 : 5 : struct spdk_io_channel *io_ch = NULL;
4745 : 5 : struct spdk_bdev_channel *bdev_ch = NULL;
4746 : 4 : struct timeout_io_cb_arg cb_arg;
4747 : :
4748 : 5 : ut_init_bdev(NULL);
4749 : 5 : bdev = allocate_bdev("bdev");
4750 : :
4751 : 5 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4752 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4753 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4754 : :
4755 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
4756 : 5 : CU_ASSERT(io_ch != NULL);
4757 : :
4758 : 5 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4759 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4760 : :
4761 : : /* This is the part1.
4762 : : * We will check the bdev_ch->io_submitted list
4763 : : * TO make sure that it can link IOs and only the user submitted IOs
4764 : : */
4765 : 5 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4766 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4767 : 5 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4768 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4769 : 5 : stub_complete_io(1);
4770 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4771 : 5 : stub_complete_io(1);
4772 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4773 : :
4774 : : /* This is the part2.
4775 : : * Test the bdev's qd poller register
4776 : : */
4777 : : /* 1st Successfully set the qd sampling period */
4778 : 5 : spdk_bdev_set_qd_sampling_period(bdev, 10);
4779 : 5 : CU_ASSERT(bdev->internal.new_period == 10);
4780 : 5 : CU_ASSERT(bdev->internal.period == 10);
4781 : 5 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4782 : 5 : poll_threads();
4783 : 5 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4784 : :
4785 : : /* 2nd Change the qd sampling period */
4786 : 5 : spdk_bdev_set_qd_sampling_period(bdev, 20);
4787 : 5 : CU_ASSERT(bdev->internal.new_period == 20);
4788 : 5 : CU_ASSERT(bdev->internal.period == 10);
4789 : 5 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4790 : 5 : poll_threads();
4791 : 5 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4792 : 5 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4793 : :
4794 : : /* 3rd Change the qd sampling period and verify qd_poll_in_progress */
4795 : 5 : spdk_delay_us(20);
4796 : 5 : poll_thread_times(0, 1);
4797 [ - + ]: 5 : CU_ASSERT(bdev->internal.qd_poll_in_progress == true);
4798 : 5 : spdk_bdev_set_qd_sampling_period(bdev, 30);
4799 : 5 : CU_ASSERT(bdev->internal.new_period == 30);
4800 : 5 : CU_ASSERT(bdev->internal.period == 20);
4801 : 5 : poll_threads();
4802 [ - + ]: 5 : CU_ASSERT(bdev->internal.qd_poll_in_progress == false);
4803 : 5 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4804 : :
4805 : : /* 4th Disable the qd sampling period */
4806 : 5 : spdk_bdev_set_qd_sampling_period(bdev, 0);
4807 : 5 : CU_ASSERT(bdev->internal.new_period == 0);
4808 : 5 : CU_ASSERT(bdev->internal.period == 30);
4809 : 5 : poll_threads();
4810 : 5 : CU_ASSERT(bdev->internal.qd_poller == NULL);
4811 : 5 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4812 : 5 : CU_ASSERT(bdev->internal.qd_desc == NULL);
4813 : :
4814 : : /* This is the part3.
4815 : : * We will test the submitted IO and reset works
4816 : : * properly with the qd sampling.
4817 : : */
4818 [ - + ]: 5 : memset(&cb_arg, 0, sizeof(cb_arg));
4819 : 5 : spdk_bdev_set_qd_sampling_period(bdev, 1);
4820 : 5 : poll_threads();
4821 : :
4822 : 5 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4823 : 5 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4824 : :
4825 : : /* Also include the reset IO */
4826 [ - + ]: 5 : memset(&cb_arg, 0, sizeof(cb_arg));
4827 : 5 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4828 : 5 : poll_threads();
4829 : :
4830 : : /* Close the desc */
4831 : 5 : spdk_put_io_channel(io_ch);
4832 : 5 : spdk_bdev_close(desc);
4833 : :
4834 : : /* Complete the submitted IO and reset */
4835 : 5 : stub_complete_io(2);
4836 : 5 : poll_threads();
4837 : :
4838 : 5 : free_bdev(bdev);
4839 : 5 : ut_fini_bdev();
4840 : 5 : }
4841 : :
4842 : : static void
4843 : 5 : lba_range_overlap(void)
4844 : : {
4845 : 4 : struct lba_range r1, r2;
4846 : :
4847 : 5 : r1.offset = 100;
4848 : 5 : r1.length = 50;
4849 : :
4850 : 5 : r2.offset = 0;
4851 : 5 : r2.length = 1;
4852 : 5 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4853 : :
4854 : 5 : r2.offset = 0;
4855 : 5 : r2.length = 100;
4856 : 5 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4857 : :
4858 : 5 : r2.offset = 0;
4859 : 5 : r2.length = 110;
4860 : 5 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4861 : :
4862 : 5 : r2.offset = 100;
4863 : 5 : r2.length = 10;
4864 : 5 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4865 : :
4866 : 5 : r2.offset = 110;
4867 : 5 : r2.length = 20;
4868 : 5 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4869 : :
4870 : 5 : r2.offset = 140;
4871 : 5 : r2.length = 150;
4872 : 5 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4873 : :
4874 : 5 : r2.offset = 130;
4875 : 5 : r2.length = 200;
4876 : 5 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4877 : :
4878 : 5 : r2.offset = 150;
4879 : 5 : r2.length = 100;
4880 : 5 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4881 : :
4882 : 5 : r2.offset = 110;
4883 : 5 : r2.length = 0;
4884 : 5 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4885 : 5 : }
4886 : :
4887 : : static bool g_lock_lba_range_done;
4888 : : static bool g_unlock_lba_range_done;
4889 : :
4890 : : static void
4891 : 40 : lock_lba_range_done(struct lba_range *range, void *ctx, int status)
4892 : : {
4893 : 40 : g_lock_lba_range_done = true;
4894 : 40 : }
4895 : :
4896 : : static void
4897 : 30 : unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
4898 : : {
4899 : 30 : g_unlock_lba_range_done = true;
4900 : 30 : }
4901 : :
4902 : : static void
4903 : 5 : lock_lba_range_check_ranges(void)
4904 : : {
4905 : : struct spdk_bdev *bdev;
4906 : 5 : struct spdk_bdev_desc *desc = NULL;
4907 : : struct spdk_io_channel *io_ch;
4908 : : struct spdk_bdev_channel *channel;
4909 : : struct lba_range *range;
4910 : 4 : int ctx1;
4911 : : int rc;
4912 : :
4913 : 5 : ut_init_bdev(NULL);
4914 : 5 : bdev = allocate_bdev("bdev0");
4915 : :
4916 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4917 : 5 : CU_ASSERT(rc == 0);
4918 : 5 : CU_ASSERT(desc != NULL);
4919 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4920 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
4921 : 5 : CU_ASSERT(io_ch != NULL);
4922 : 5 : channel = spdk_io_channel_get_ctx(io_ch);
4923 : :
4924 : 5 : g_lock_lba_range_done = false;
4925 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4926 : 5 : CU_ASSERT(rc == 0);
4927 : 5 : poll_threads();
4928 : :
4929 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
4930 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
4931 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
4932 : 5 : CU_ASSERT(range->offset == 20);
4933 : 5 : CU_ASSERT(range->length == 10);
4934 : 5 : CU_ASSERT(range->owner_ch == channel);
4935 : :
4936 : : /* Unlocks must exactly match a lock. */
4937 : 5 : g_unlock_lba_range_done = false;
4938 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
4939 : 5 : CU_ASSERT(rc == -EINVAL);
4940 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == false);
4941 : :
4942 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
4943 : 5 : CU_ASSERT(rc == 0);
4944 : 5 : spdk_delay_us(100);
4945 : 5 : poll_threads();
4946 : :
4947 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
4948 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
4949 : :
4950 : 5 : spdk_put_io_channel(io_ch);
4951 : 5 : spdk_bdev_close(desc);
4952 : 5 : free_bdev(bdev);
4953 : 5 : ut_fini_bdev();
4954 : 5 : }
4955 : :
4956 : : static void
4957 : 5 : lock_lba_range_with_io_outstanding(void)
4958 : : {
4959 : : struct spdk_bdev *bdev;
4960 : 5 : struct spdk_bdev_desc *desc = NULL;
4961 : : struct spdk_io_channel *io_ch;
4962 : : struct spdk_bdev_channel *channel;
4963 : : struct lba_range *range;
4964 : 4 : char buf[4096];
4965 : 4 : int ctx1;
4966 : : int rc;
4967 : :
4968 : 5 : ut_init_bdev(NULL);
4969 : 5 : bdev = allocate_bdev("bdev0");
4970 : :
4971 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4972 : 5 : CU_ASSERT(rc == 0);
4973 : 5 : CU_ASSERT(desc != NULL);
4974 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4975 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
4976 : 5 : CU_ASSERT(io_ch != NULL);
4977 : 5 : channel = spdk_io_channel_get_ctx(io_ch);
4978 : :
4979 : 5 : g_io_done = false;
4980 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
4981 : 5 : CU_ASSERT(rc == 0);
4982 : :
4983 : 5 : g_lock_lba_range_done = false;
4984 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4985 : 5 : CU_ASSERT(rc == 0);
4986 : 5 : poll_threads();
4987 : :
4988 : : /* The lock should immediately become valid, since there are no outstanding
4989 : : * write I/O.
4990 : : */
4991 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
4992 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
4993 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
4994 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
4995 : 5 : CU_ASSERT(range->offset == 20);
4996 : 5 : CU_ASSERT(range->length == 10);
4997 : 5 : CU_ASSERT(range->owner_ch == channel);
4998 : 5 : CU_ASSERT(range->locked_ctx == &ctx1);
4999 : :
5000 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5001 : 5 : CU_ASSERT(rc == 0);
5002 : 5 : stub_complete_io(1);
5003 : 5 : spdk_delay_us(100);
5004 : 5 : poll_threads();
5005 : :
5006 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5007 : :
5008 : : /* Now try again, but with a write I/O. */
5009 : 5 : g_io_done = false;
5010 : 5 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
5011 : 5 : CU_ASSERT(rc == 0);
5012 : :
5013 : 5 : g_lock_lba_range_done = false;
5014 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5015 : 5 : CU_ASSERT(rc == 0);
5016 : 5 : poll_threads();
5017 : :
5018 : : /* The lock should not be fully valid yet, since a write I/O is outstanding.
5019 : : * But note that the range should be on the channel's locked_list, to make sure no
5020 : : * new write I/O are started.
5021 : : */
5022 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5023 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == false);
5024 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
5025 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5026 : 5 : CU_ASSERT(range->offset == 20);
5027 : 5 : CU_ASSERT(range->length == 10);
5028 : :
5029 : : /* Complete the write I/O. This should make the lock valid (checked by confirming
5030 : : * our callback was invoked).
5031 : : */
5032 : 5 : stub_complete_io(1);
5033 : 5 : spdk_delay_us(100);
5034 : 5 : poll_threads();
5035 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5036 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5037 : :
5038 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5039 : 5 : CU_ASSERT(rc == 0);
5040 : 5 : poll_threads();
5041 : :
5042 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5043 : :
5044 : 5 : spdk_put_io_channel(io_ch);
5045 : 5 : spdk_bdev_close(desc);
5046 : 5 : free_bdev(bdev);
5047 : 5 : ut_fini_bdev();
5048 : 5 : }
5049 : :
5050 : : static void
5051 : 5 : lock_lba_range_overlapped(void)
5052 : : {
5053 : : struct spdk_bdev *bdev;
5054 : 5 : struct spdk_bdev_desc *desc = NULL;
5055 : : struct spdk_io_channel *io_ch;
5056 : : struct spdk_bdev_channel *channel;
5057 : : struct lba_range *range;
5058 : 4 : int ctx1;
5059 : : int rc;
5060 : :
5061 : 5 : ut_init_bdev(NULL);
5062 : 5 : bdev = allocate_bdev("bdev0");
5063 : :
5064 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5065 : 5 : CU_ASSERT(rc == 0);
5066 : 5 : CU_ASSERT(desc != NULL);
5067 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5068 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
5069 : 5 : CU_ASSERT(io_ch != NULL);
5070 : 5 : channel = spdk_io_channel_get_ctx(io_ch);
5071 : :
5072 : : /* Lock range 20-29. */
5073 : 5 : g_lock_lba_range_done = false;
5074 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5075 : 5 : CU_ASSERT(rc == 0);
5076 : 5 : poll_threads();
5077 : :
5078 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5079 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
5080 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5081 : 5 : CU_ASSERT(range->offset == 20);
5082 : 5 : CU_ASSERT(range->length == 10);
5083 : :
5084 : : /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
5085 : : * 20-29.
5086 : : */
5087 : 5 : g_lock_lba_range_done = false;
5088 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
5089 : 5 : CU_ASSERT(rc == 0);
5090 : 5 : poll_threads();
5091 : :
5092 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == false);
5093 : 5 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5094 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5095 : 5 : CU_ASSERT(range->offset == 25);
5096 : 5 : CU_ASSERT(range->length == 15);
5097 : :
5098 : : /* Unlock 20-29. This should result in range 25-39 now getting locked since it
5099 : : * no longer overlaps with an active lock.
5100 : : */
5101 : 5 : g_unlock_lba_range_done = false;
5102 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5103 : 5 : CU_ASSERT(rc == 0);
5104 : 5 : poll_threads();
5105 : :
5106 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5107 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5108 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
5109 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5110 : 5 : CU_ASSERT(range->offset == 25);
5111 : 5 : CU_ASSERT(range->length == 15);
5112 : :
5113 : : /* Lock 40-59. This should immediately lock since it does not overlap with the
5114 : : * currently active 25-39 lock.
5115 : : */
5116 : 5 : g_lock_lba_range_done = false;
5117 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
5118 : 5 : CU_ASSERT(rc == 0);
5119 : 5 : poll_threads();
5120 : :
5121 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5122 : 5 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5123 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5124 : 5 : range = TAILQ_NEXT(range, tailq);
5125 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5126 : 5 : CU_ASSERT(range->offset == 40);
5127 : 5 : CU_ASSERT(range->length == 20);
5128 : :
5129 : : /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
5130 : 5 : g_lock_lba_range_done = false;
5131 : 5 : rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
5132 : 5 : CU_ASSERT(rc == 0);
5133 : 5 : poll_threads();
5134 : :
5135 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == false);
5136 : 5 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5137 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5138 : 5 : CU_ASSERT(range->offset == 35);
5139 : 5 : CU_ASSERT(range->length == 10);
5140 : :
5141 : : /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
5142 : : * the 40-59 lock is still active.
5143 : : */
5144 : 5 : g_unlock_lba_range_done = false;
5145 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
5146 : 5 : CU_ASSERT(rc == 0);
5147 : 5 : poll_threads();
5148 : :
5149 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5150 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == false);
5151 : 5 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5152 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5153 : 5 : CU_ASSERT(range->offset == 35);
5154 : 5 : CU_ASSERT(range->length == 10);
5155 : :
5156 : : /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
5157 : : * no longer any active overlapping locks.
5158 : : */
5159 : 5 : g_unlock_lba_range_done = false;
5160 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
5161 : 5 : CU_ASSERT(rc == 0);
5162 : 5 : poll_threads();
5163 : :
5164 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5165 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5166 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5167 : 5 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5168 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5169 : 5 : CU_ASSERT(range->offset == 35);
5170 : 5 : CU_ASSERT(range->length == 10);
5171 : :
5172 : : /* Finally, unlock 35-44. */
5173 : 5 : g_unlock_lba_range_done = false;
5174 : 5 : rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
5175 : 5 : CU_ASSERT(rc == 0);
5176 : 5 : poll_threads();
5177 : :
5178 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5179 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
5180 : :
5181 : 5 : spdk_put_io_channel(io_ch);
5182 : 5 : spdk_bdev_close(desc);
5183 : 5 : free_bdev(bdev);
5184 : 5 : ut_fini_bdev();
5185 : 5 : }
5186 : :
5187 : : static void
5188 : 15 : bdev_quiesce_done(void *ctx, int status)
5189 : : {
5190 : 15 : g_lock_lba_range_done = true;
5191 : 15 : }
5192 : :
5193 : : static void
5194 : 20 : bdev_unquiesce_done(void *ctx, int status)
5195 : : {
5196 : 20 : g_unlock_lba_range_done = true;
5197 : 20 : }
5198 : :
5199 : : static void
5200 : 5 : bdev_quiesce_done_unquiesce(void *ctx, int status)
5201 : : {
5202 : 5 : struct spdk_bdev *bdev = ctx;
5203 : : int rc;
5204 : :
5205 : 5 : g_lock_lba_range_done = true;
5206 : :
5207 : 5 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL);
5208 : 5 : CU_ASSERT(rc == 0);
5209 : 5 : }
5210 : :
5211 : : static void
5212 : 5 : bdev_quiesce(void)
5213 : : {
5214 : : struct spdk_bdev *bdev;
5215 : 5 : struct spdk_bdev_desc *desc = NULL;
5216 : : struct spdk_io_channel *io_ch;
5217 : : struct spdk_bdev_channel *channel;
5218 : : struct lba_range *range;
5219 : : struct spdk_bdev_io *bdev_io;
5220 : 4 : int ctx1;
5221 : : int rc;
5222 : :
5223 : 5 : ut_init_bdev(NULL);
5224 : 5 : bdev = allocate_bdev("bdev0");
5225 : :
5226 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5227 : 5 : CU_ASSERT(rc == 0);
5228 : 5 : CU_ASSERT(desc != NULL);
5229 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5230 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
5231 : 5 : CU_ASSERT(io_ch != NULL);
5232 : 5 : channel = spdk_io_channel_get_ctx(io_ch);
5233 : :
5234 : 5 : g_lock_lba_range_done = false;
5235 : 5 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5236 : 5 : CU_ASSERT(rc == 0);
5237 : 5 : poll_threads();
5238 : :
5239 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5240 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
5241 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5242 : 5 : CU_ASSERT(range->offset == 0);
5243 : 5 : CU_ASSERT(range->length == bdev->blockcnt);
5244 : 5 : CU_ASSERT(range->owner_ch == NULL);
5245 : 5 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5246 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5247 : 5 : CU_ASSERT(range->offset == 0);
5248 : 5 : CU_ASSERT(range->length == bdev->blockcnt);
5249 : 5 : CU_ASSERT(range->owner_ch == NULL);
5250 : :
5251 : 5 : g_unlock_lba_range_done = false;
5252 : 5 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5253 : 5 : CU_ASSERT(rc == 0);
5254 : 5 : spdk_delay_us(100);
5255 : 5 : poll_threads();
5256 : :
5257 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5258 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5259 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5260 : :
5261 : 5 : g_lock_lba_range_done = false;
5262 : 5 : rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1);
5263 : 5 : CU_ASSERT(rc == 0);
5264 : 5 : poll_threads();
5265 : :
5266 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5267 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
5268 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5269 : 5 : CU_ASSERT(range->offset == 20);
5270 : 5 : CU_ASSERT(range->length == 10);
5271 : 5 : CU_ASSERT(range->owner_ch == NULL);
5272 : 5 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5273 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5274 : 5 : CU_ASSERT(range->offset == 20);
5275 : 5 : CU_ASSERT(range->length == 10);
5276 : 5 : CU_ASSERT(range->owner_ch == NULL);
5277 : :
5278 : : /* Unlocks must exactly match a lock. */
5279 : 5 : g_unlock_lba_range_done = false;
5280 : 5 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1);
5281 : 5 : CU_ASSERT(rc == -EINVAL);
5282 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == false);
5283 : :
5284 : 5 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1);
5285 : 5 : CU_ASSERT(rc == 0);
5286 : 5 : spdk_delay_us(100);
5287 : 5 : poll_threads();
5288 : :
5289 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5290 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5291 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5292 : :
5293 : : /* Test unquiesce from quiesce cb */
5294 : 5 : g_lock_lba_range_done = false;
5295 : 5 : g_unlock_lba_range_done = false;
5296 : 5 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev);
5297 : 5 : CU_ASSERT(rc == 0);
5298 : 5 : poll_threads();
5299 : :
5300 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5301 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5302 : :
5303 : : /* Test quiesce with read I/O */
5304 : 5 : g_lock_lba_range_done = false;
5305 : 5 : g_unlock_lba_range_done = false;
5306 : 5 : g_io_done = false;
5307 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5308 : 5 : CU_ASSERT(rc == 0);
5309 : :
5310 : 5 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5311 : 5 : CU_ASSERT(rc == 0);
5312 : 5 : poll_threads();
5313 : :
5314 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5315 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == false);
5316 : 5 : range = TAILQ_FIRST(&channel->locked_ranges);
5317 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(range != NULL);
5318 : :
5319 : 5 : stub_complete_io(1);
5320 : 5 : spdk_delay_us(100);
5321 : 5 : poll_threads();
5322 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5323 [ - + ]: 5 : CU_ASSERT(g_lock_lba_range_done == true);
5324 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5325 : :
5326 : 5 : g_io_done = false;
5327 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5328 : 5 : CU_ASSERT(rc == 0);
5329 : :
5330 : 5 : bdev_io = TAILQ_FIRST(&channel->io_locked);
5331 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
5332 : 5 : CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20);
5333 : 5 : CU_ASSERT(bdev_io->u.bdev.num_blocks == 1);
5334 : :
5335 : 5 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5336 : 5 : CU_ASSERT(rc == 0);
5337 : 5 : spdk_delay_us(100);
5338 : 5 : poll_threads();
5339 : :
5340 [ - + ]: 5 : CU_ASSERT(g_unlock_lba_range_done == true);
5341 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5342 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5343 : :
5344 : 5 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5345 : 5 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
5346 : 5 : poll_threads();
5347 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5348 : :
5349 : 5 : spdk_put_io_channel(io_ch);
5350 : 5 : spdk_bdev_close(desc);
5351 : 5 : free_bdev(bdev);
5352 : 5 : ut_fini_bdev();
5353 : 5 : }
5354 : :
5355 : : static void
5356 : 30 : abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5357 : : {
5358 : 30 : g_abort_done = true;
5359 : 30 : g_abort_status = bdev_io->internal.status;
5360 : 30 : spdk_bdev_free_io(bdev_io);
5361 : 30 : }
5362 : :
5363 : : static void
5364 : 5 : bdev_io_abort(void)
5365 : : {
5366 : : struct spdk_bdev *bdev;
5367 : 5 : struct spdk_bdev_desc *desc = NULL;
5368 : : struct spdk_io_channel *io_ch;
5369 : : struct spdk_bdev_channel *channel;
5370 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
5371 : 5 : struct spdk_bdev_opts bdev_opts = {};
5372 : 4 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
5373 : 5 : uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
5374 : : int rc;
5375 : :
5376 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5377 : 5 : bdev_opts.bdev_io_pool_size = 7;
5378 : 5 : bdev_opts.bdev_io_cache_size = 2;
5379 : 5 : ut_init_bdev(&bdev_opts);
5380 : :
5381 : 5 : bdev = allocate_bdev("bdev0");
5382 : :
5383 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5384 : 5 : CU_ASSERT(rc == 0);
5385 : 5 : CU_ASSERT(desc != NULL);
5386 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5387 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
5388 : 5 : CU_ASSERT(io_ch != NULL);
5389 : 5 : channel = spdk_io_channel_get_ctx(io_ch);
5390 : 5 : mgmt_ch = channel->shared_resource->mgmt_ch;
5391 : :
5392 : 5 : g_abort_done = false;
5393 : :
5394 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
5395 : :
5396 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5397 : 5 : CU_ASSERT(rc == -ENOTSUP);
5398 : :
5399 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
5400 : :
5401 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
5402 : 5 : CU_ASSERT(rc == 0);
5403 [ - + ]: 5 : CU_ASSERT(g_abort_done == true);
5404 : 5 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
5405 : :
5406 : : /* Test the case that the target I/O was successfully aborted. */
5407 : 5 : g_io_done = false;
5408 : :
5409 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5410 : 5 : CU_ASSERT(rc == 0);
5411 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5412 : :
5413 : 5 : g_abort_done = false;
5414 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5415 : :
5416 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5417 : 5 : CU_ASSERT(rc == 0);
5418 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5419 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5420 : 5 : stub_complete_io(1);
5421 [ - + ]: 5 : CU_ASSERT(g_abort_done == true);
5422 : 5 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5423 : :
5424 : : /* Test the case that the target I/O was not aborted because it completed
5425 : : * in the middle of execution of the abort.
5426 : : */
5427 : 5 : g_io_done = false;
5428 : :
5429 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5430 : 5 : CU_ASSERT(rc == 0);
5431 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5432 : :
5433 : 5 : g_abort_done = false;
5434 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5435 : :
5436 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5437 : 5 : CU_ASSERT(rc == 0);
5438 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5439 : :
5440 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5441 : 5 : stub_complete_io(1);
5442 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5443 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5444 : :
5445 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5446 : 5 : stub_complete_io(1);
5447 [ - + ]: 5 : CU_ASSERT(g_abort_done == true);
5448 : 5 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5449 : :
5450 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5451 : :
5452 : 5 : bdev->optimal_io_boundary = 16;
5453 : 5 : bdev->split_on_optimal_io_boundary = true;
5454 : :
5455 : : /* Test that a single-vector command which is split is aborted correctly.
5456 : : * Offset 14, length 8, payload 0xF000
5457 : : * Child - Offset 14, length 2, payload 0xF000
5458 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
5459 : : */
5460 : 5 : g_io_done = false;
5461 : :
5462 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
5463 : 5 : CU_ASSERT(rc == 0);
5464 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5465 : :
5466 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5467 : :
5468 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5469 : :
5470 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5471 : 5 : CU_ASSERT(rc == 0);
5472 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5473 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5474 : 5 : stub_complete_io(2);
5475 [ - + ]: 5 : CU_ASSERT(g_abort_done == true);
5476 : 5 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5477 : :
5478 : : /* Test that a multi-vector command that needs to be split by strip and then
5479 : : * needs to be split is aborted correctly. Abort is requested before the second
5480 : : * child I/O was submitted. The parent I/O should complete with failure without
5481 : : * submitting the second child I/O.
5482 : : */
5483 [ + + ]: 325 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
5484 : 320 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
5485 : 320 : iov[i].iov_len = 512;
5486 : : }
5487 : :
5488 : 5 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
5489 : 5 : g_io_done = false;
5490 : 5 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
5491 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
5492 : 5 : CU_ASSERT(rc == 0);
5493 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5494 : :
5495 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5496 : :
5497 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5498 : :
5499 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5500 : 5 : CU_ASSERT(rc == 0);
5501 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5502 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5503 : 5 : stub_complete_io(1);
5504 [ - + ]: 5 : CU_ASSERT(g_abort_done == true);
5505 : 5 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5506 : :
5507 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5508 : :
5509 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5510 : :
5511 : 5 : bdev->optimal_io_boundary = 16;
5512 : 5 : g_io_done = false;
5513 : :
5514 : : /* Test that a ingle-vector command which is split is aborted correctly.
5515 : : * Differently from the above, the child abort request will be submitted
5516 : : * sequentially due to the capacity of spdk_bdev_io.
5517 : : */
5518 : 5 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
5519 : 5 : CU_ASSERT(rc == 0);
5520 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5521 : :
5522 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5523 : :
5524 : 5 : g_abort_done = false;
5525 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5526 : :
5527 : 5 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5528 : 5 : CU_ASSERT(rc == 0);
5529 : 5 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
5530 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5531 : :
5532 : 5 : stub_complete_io(1);
5533 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5534 : 5 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5535 : 5 : stub_complete_io(3);
5536 [ - + ]: 5 : CU_ASSERT(g_abort_done == true);
5537 : 5 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5538 : :
5539 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5540 : :
5541 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5542 : :
5543 : 5 : spdk_put_io_channel(io_ch);
5544 : 5 : spdk_bdev_close(desc);
5545 : 5 : free_bdev(bdev);
5546 : 5 : ut_fini_bdev();
5547 : 5 : }
5548 : :
5549 : : static void
5550 : 5 : bdev_unmap(void)
5551 : : {
5552 : : struct spdk_bdev *bdev;
5553 : 5 : struct spdk_bdev_desc *desc = NULL;
5554 : : struct spdk_io_channel *ioch;
5555 : : struct spdk_bdev_channel *bdev_ch;
5556 : : struct ut_expected_io *expected_io;
5557 : 5 : struct spdk_bdev_opts bdev_opts = {};
5558 : : uint32_t i, num_outstanding;
5559 : : uint64_t offset, num_blocks, max_unmap_blocks, num_children;
5560 : : int rc;
5561 : :
5562 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5563 : 5 : bdev_opts.bdev_io_pool_size = 512;
5564 : 5 : bdev_opts.bdev_io_cache_size = 64;
5565 : 5 : ut_init_bdev(&bdev_opts);
5566 : :
5567 : 5 : bdev = allocate_bdev("bdev");
5568 : :
5569 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5570 : 5 : CU_ASSERT_EQUAL(rc, 0);
5571 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5572 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5573 : 5 : ioch = spdk_bdev_get_io_channel(desc);
5574 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5575 : 5 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5576 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5577 : :
5578 : 5 : fn_table.submit_request = stub_submit_request;
5579 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5580 : :
5581 : : /* Case 1: First test the request won't be split */
5582 : 5 : num_blocks = 32;
5583 : :
5584 : 5 : g_io_done = false;
5585 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0);
5586 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5587 : 5 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5588 : 5 : CU_ASSERT_EQUAL(rc, 0);
5589 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5590 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5591 : 5 : stub_complete_io(1);
5592 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5593 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5594 : :
5595 : : /* Case 2: Test the split with 2 children requests */
5596 : 5 : bdev->max_unmap = 8;
5597 : 5 : bdev->max_unmap_segments = 2;
5598 : 5 : max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments;
5599 : 5 : num_blocks = max_unmap_blocks * 2;
5600 : 5 : offset = 0;
5601 : :
5602 : 5 : g_io_done = false;
5603 [ + + ]: 15 : for (i = 0; i < 2; i++) {
5604 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5605 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5606 : 10 : offset += max_unmap_blocks;
5607 : : }
5608 : :
5609 : 5 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5610 : 5 : CU_ASSERT_EQUAL(rc, 0);
5611 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5612 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5613 : 5 : stub_complete_io(2);
5614 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5615 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5616 : :
5617 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5618 : 5 : num_children = 15;
5619 : 5 : num_blocks = max_unmap_blocks * num_children;
5620 : 5 : g_io_done = false;
5621 : 5 : offset = 0;
5622 [ + + ]: 80 : for (i = 0; i < num_children; i++) {
5623 : 75 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5624 : 75 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5625 : 75 : offset += max_unmap_blocks;
5626 : : }
5627 : :
5628 : 5 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5629 : 5 : CU_ASSERT_EQUAL(rc, 0);
5630 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5631 : :
5632 [ + + ]: 15 : while (num_children > 0) {
5633 : 10 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5634 : 10 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5635 : 10 : stub_complete_io(num_outstanding);
5636 : 10 : num_children -= num_outstanding;
5637 : : }
5638 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5639 : :
5640 : 5 : spdk_put_io_channel(ioch);
5641 : 5 : spdk_bdev_close(desc);
5642 : 5 : free_bdev(bdev);
5643 : 5 : ut_fini_bdev();
5644 : 5 : }
5645 : :
5646 : : static void
5647 : 5 : bdev_write_zeroes_split_test(void)
5648 : : {
5649 : : struct spdk_bdev *bdev;
5650 : 5 : struct spdk_bdev_desc *desc = NULL;
5651 : : struct spdk_io_channel *ioch;
5652 : : struct spdk_bdev_channel *bdev_ch;
5653 : : struct ut_expected_io *expected_io;
5654 : 5 : struct spdk_bdev_opts bdev_opts = {};
5655 : : uint32_t i, num_outstanding;
5656 : : uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children;
5657 : : int rc;
5658 : :
5659 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5660 : 5 : bdev_opts.bdev_io_pool_size = 512;
5661 : 5 : bdev_opts.bdev_io_cache_size = 64;
5662 : 5 : ut_init_bdev(&bdev_opts);
5663 : :
5664 : 5 : bdev = allocate_bdev("bdev");
5665 : :
5666 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5667 : 5 : CU_ASSERT_EQUAL(rc, 0);
5668 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5669 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5670 : 5 : ioch = spdk_bdev_get_io_channel(desc);
5671 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5672 : 5 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5673 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5674 : :
5675 : 5 : fn_table.submit_request = stub_submit_request;
5676 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5677 : :
5678 : : /* Case 1: First test the request won't be split */
5679 : 5 : num_blocks = 32;
5680 : :
5681 : 5 : g_io_done = false;
5682 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
5683 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5684 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5685 : 5 : CU_ASSERT_EQUAL(rc, 0);
5686 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5687 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5688 : 5 : stub_complete_io(1);
5689 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5690 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5691 : :
5692 : : /* Case 2: Test the split with 2 children requests */
5693 : 5 : max_write_zeroes_blocks = 8;
5694 : 5 : bdev->max_write_zeroes = max_write_zeroes_blocks;
5695 : 5 : num_blocks = max_write_zeroes_blocks * 2;
5696 : 5 : offset = 0;
5697 : :
5698 : 5 : g_io_done = false;
5699 [ + + ]: 15 : for (i = 0; i < 2; i++) {
5700 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5701 : : 0);
5702 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5703 : 10 : offset += max_write_zeroes_blocks;
5704 : : }
5705 : :
5706 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5707 : 5 : CU_ASSERT_EQUAL(rc, 0);
5708 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5709 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5710 : 5 : stub_complete_io(2);
5711 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5712 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5713 : :
5714 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5715 : 5 : num_children = 15;
5716 : 5 : num_blocks = max_write_zeroes_blocks * num_children;
5717 : 5 : g_io_done = false;
5718 : 5 : offset = 0;
5719 [ + + ]: 80 : for (i = 0; i < num_children; i++) {
5720 : 75 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5721 : : 0);
5722 : 75 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5723 : 75 : offset += max_write_zeroes_blocks;
5724 : : }
5725 : :
5726 : 5 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5727 : 5 : CU_ASSERT_EQUAL(rc, 0);
5728 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5729 : :
5730 [ + + ]: 15 : while (num_children > 0) {
5731 : 10 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5732 : 10 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5733 : 10 : stub_complete_io(num_outstanding);
5734 : 10 : num_children -= num_outstanding;
5735 : : }
5736 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
5737 : :
5738 : 5 : spdk_put_io_channel(ioch);
5739 : 5 : spdk_bdev_close(desc);
5740 : 5 : free_bdev(bdev);
5741 : 5 : ut_fini_bdev();
5742 : 5 : }
5743 : :
5744 : : static void
5745 : 5 : bdev_set_options_test(void)
5746 : : {
5747 : 5 : struct spdk_bdev_opts bdev_opts = {};
5748 : : int rc;
5749 : :
5750 : : /* Case1: Do not set opts_size */
5751 : 5 : rc = spdk_bdev_set_opts(&bdev_opts);
5752 : 5 : CU_ASSERT(rc == -1);
5753 : 5 : }
5754 : :
5755 : : static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d;
5756 : :
5757 : : static int
5758 : 15 : test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains,
5759 : : int array_size)
5760 : : {
5761 [ + + + + ]: 15 : if (array_size > 0 && domains) {
5762 : 5 : domains[0] = g_bdev_memory_domain;
5763 : : }
5764 : :
5765 : 15 : return 1;
5766 : : }
5767 : :
5768 : : static void
5769 : 5 : bdev_get_memory_domains(void)
5770 : : {
5771 : 5 : struct spdk_bdev_fn_table fn_table = {
5772 : : .get_memory_domains = test_bdev_get_supported_dma_device_types_op
5773 : : };
5774 : 5 : struct spdk_bdev bdev = { .fn_table = &fn_table };
5775 : 5 : struct spdk_memory_domain *domains[2] = {};
5776 : : int rc;
5777 : :
5778 : : /* bdev is NULL */
5779 : 5 : rc = spdk_bdev_get_memory_domains(NULL, domains, 2);
5780 : 5 : CU_ASSERT(rc == -EINVAL);
5781 : :
5782 : : /* domains is NULL */
5783 : 5 : rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2);
5784 : 5 : CU_ASSERT(rc == 1);
5785 : :
5786 : : /* array size is 0 */
5787 : 5 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 0);
5788 : 5 : CU_ASSERT(rc == 1);
5789 : :
5790 : : /* get_supported_dma_device_types op is set */
5791 : 5 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5792 : 5 : CU_ASSERT(rc == 1);
5793 : 5 : CU_ASSERT(domains[0] == g_bdev_memory_domain);
5794 : :
5795 : : /* get_supported_dma_device_types op is not set */
5796 : 5 : fn_table.get_memory_domains = NULL;
5797 : 5 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5798 : 5 : CU_ASSERT(rc == 0);
5799 : 5 : }
5800 : :
5801 : : static void
5802 : 10 : _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts)
5803 : : {
5804 : : struct spdk_bdev *bdev;
5805 : 10 : struct spdk_bdev_desc *desc = NULL;
5806 : : struct spdk_io_channel *io_ch;
5807 : 8 : char io_buf[512];
5808 : 10 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5809 : : struct ut_expected_io *expected_io;
5810 : : int rc;
5811 : :
5812 : 10 : ut_init_bdev(NULL);
5813 : :
5814 : 10 : bdev = allocate_bdev("bdev0");
5815 : 10 : bdev->md_interleave = false;
5816 : 10 : bdev->md_len = 8;
5817 : :
5818 : 10 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5819 : 10 : CU_ASSERT(rc == 0);
5820 [ - + ]: 10 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5821 : 10 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5822 : 10 : io_ch = spdk_bdev_get_io_channel(desc);
5823 : 10 : CU_ASSERT(io_ch != NULL);
5824 : :
5825 : : /* read */
5826 : 10 : g_io_done = false;
5827 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
5828 [ + + ]: 10 : if (ext_io_opts) {
5829 : 5 : expected_io->md_buf = ext_io_opts->metadata;
5830 : : }
5831 : 10 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5832 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5833 : :
5834 : 10 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5835 : :
5836 : 10 : CU_ASSERT(rc == 0);
5837 [ - + ]: 10 : CU_ASSERT(g_io_done == false);
5838 : 10 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5839 : 10 : stub_complete_io(1);
5840 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
5841 : :
5842 : : /* write */
5843 : 10 : g_io_done = false;
5844 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
5845 [ + + ]: 10 : if (ext_io_opts) {
5846 : 5 : expected_io->md_buf = ext_io_opts->metadata;
5847 : : }
5848 : 10 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5849 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5850 : :
5851 : 10 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5852 : :
5853 : 10 : CU_ASSERT(rc == 0);
5854 [ - + ]: 10 : CU_ASSERT(g_io_done == false);
5855 : 10 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5856 : 10 : stub_complete_io(1);
5857 [ - + ]: 10 : CU_ASSERT(g_io_done == true);
5858 : :
5859 : 10 : spdk_put_io_channel(io_ch);
5860 : 10 : spdk_bdev_close(desc);
5861 : 10 : free_bdev(bdev);
5862 : 10 : ut_fini_bdev();
5863 : :
5864 : 10 : }
5865 : :
5866 : : static void
5867 : 5 : bdev_io_ext(void)
5868 : : {
5869 : 5 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5870 : : .metadata = (void *)0xFF000000,
5871 : : .size = sizeof(ext_io_opts),
5872 : : .dif_check_flags_exclude_mask = 0
5873 : : };
5874 : :
5875 : 5 : _bdev_io_ext(&ext_io_opts);
5876 : 5 : }
5877 : :
5878 : : static void
5879 : 5 : bdev_io_ext_no_opts(void)
5880 : : {
5881 : 5 : _bdev_io_ext(NULL);
5882 : 5 : }
5883 : :
5884 : : static void
5885 : 5 : bdev_io_ext_invalid_opts(void)
5886 : : {
5887 : : struct spdk_bdev *bdev;
5888 : 5 : struct spdk_bdev_desc *desc = NULL;
5889 : : struct spdk_io_channel *io_ch;
5890 : 4 : char io_buf[512];
5891 : 5 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5892 : 5 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5893 : : .metadata = (void *)0xFF000000,
5894 : : .size = sizeof(ext_io_opts),
5895 : : .dif_check_flags_exclude_mask = 0
5896 : : };
5897 : : int rc;
5898 : :
5899 : 5 : ut_init_bdev(NULL);
5900 : :
5901 : 5 : bdev = allocate_bdev("bdev0");
5902 : 5 : bdev->md_interleave = false;
5903 : 5 : bdev->md_len = 8;
5904 : :
5905 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5906 : 5 : CU_ASSERT(rc == 0);
5907 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5908 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5909 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
5910 : 5 : CU_ASSERT(io_ch != NULL);
5911 : :
5912 : : /* Test invalid ext_opts size */
5913 : 5 : ext_io_opts.size = 0;
5914 : 5 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5915 : 5 : CU_ASSERT(rc == -EINVAL);
5916 : 5 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5917 : 5 : CU_ASSERT(rc == -EINVAL);
5918 : :
5919 : 5 : ext_io_opts.size = sizeof(ext_io_opts) * 2;
5920 : 5 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5921 : 5 : CU_ASSERT(rc == -EINVAL);
5922 : 5 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5923 : 5 : CU_ASSERT(rc == -EINVAL);
5924 : :
5925 : 5 : ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) +
5926 : : sizeof(ext_io_opts.metadata) - 1;
5927 : 5 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5928 : 5 : CU_ASSERT(rc == -EINVAL);
5929 : 5 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
5930 : 5 : CU_ASSERT(rc == -EINVAL);
5931 : :
5932 : 5 : spdk_put_io_channel(io_ch);
5933 : 5 : spdk_bdev_close(desc);
5934 : 5 : free_bdev(bdev);
5935 : 5 : ut_fini_bdev();
5936 : 5 : }
5937 : :
5938 : : static void
5939 : 5 : bdev_io_ext_split(void)
5940 : : {
5941 : : struct spdk_bdev *bdev;
5942 : 5 : struct spdk_bdev_desc *desc = NULL;
5943 : : struct spdk_io_channel *io_ch;
5944 : 4 : char io_buf[512];
5945 : 5 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5946 : : struct ut_expected_io *expected_io;
5947 : 5 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5948 : : .metadata = (void *)0xFF000000,
5949 : : .size = sizeof(ext_io_opts),
5950 : : .dif_check_flags_exclude_mask = 0
5951 : : };
5952 : : int rc;
5953 : :
5954 : 5 : ut_init_bdev(NULL);
5955 : :
5956 : 5 : bdev = allocate_bdev("bdev0");
5957 : 5 : bdev->md_interleave = false;
5958 : 5 : bdev->md_len = 8;
5959 : :
5960 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5961 : 5 : CU_ASSERT(rc == 0);
5962 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5963 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5964 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
5965 : 5 : CU_ASSERT(io_ch != NULL);
5966 : :
5967 : : /* Check that IO request with ext_opts and metadata is split correctly
5968 : : * Offset 14, length 8, payload 0xF000
5969 : : * Child - Offset 14, length 2, payload 0xF000
5970 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
5971 : : */
5972 : 5 : bdev->optimal_io_boundary = 16;
5973 : 5 : bdev->split_on_optimal_io_boundary = true;
5974 : 5 : bdev->md_interleave = false;
5975 : 5 : bdev->md_len = 8;
5976 : :
5977 : 5 : iov.iov_base = (void *)0xF000;
5978 : 5 : iov.iov_len = 4096;
5979 [ - + ]: 5 : memset(&ext_io_opts, 0, sizeof(ext_io_opts));
5980 : 5 : ext_io_opts.metadata = (void *)0xFF000000;
5981 : 5 : ext_io_opts.size = sizeof(ext_io_opts);
5982 : 5 : g_io_done = false;
5983 : :
5984 : : /* read */
5985 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
5986 : 5 : expected_io->md_buf = ext_io_opts.metadata;
5987 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
5988 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5989 : :
5990 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
5991 : 5 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
5992 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
5993 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5994 : :
5995 : 5 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
5996 : 5 : CU_ASSERT(rc == 0);
5997 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
5998 : :
5999 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6000 : 5 : stub_complete_io(2);
6001 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6002 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6003 : :
6004 : : /* write */
6005 : 5 : g_io_done = false;
6006 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
6007 : 5 : expected_io->md_buf = ext_io_opts.metadata;
6008 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
6009 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6010 : :
6011 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1);
6012 : 5 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
6013 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
6014 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6015 : :
6016 : 5 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
6017 : 5 : CU_ASSERT(rc == 0);
6018 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6019 : :
6020 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6021 : 5 : stub_complete_io(2);
6022 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6023 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6024 : :
6025 : 5 : spdk_put_io_channel(io_ch);
6026 : 5 : spdk_bdev_close(desc);
6027 : 5 : free_bdev(bdev);
6028 : 5 : ut_fini_bdev();
6029 : 5 : }
6030 : :
6031 : : static void
6032 : 5 : bdev_io_ext_bounce_buffer(void)
6033 : : {
6034 : : struct spdk_bdev *bdev;
6035 : 5 : struct spdk_bdev_desc *desc = NULL;
6036 : : struct spdk_io_channel *io_ch;
6037 : 4 : char io_buf[512];
6038 : 5 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6039 : : struct ut_expected_io *expected_io, *aux_io;
6040 : 5 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6041 : : .metadata = (void *)0xFF000000,
6042 : : .size = sizeof(ext_io_opts),
6043 : : .dif_check_flags_exclude_mask = 0
6044 : : };
6045 : : int rc;
6046 : :
6047 : 5 : ut_init_bdev(NULL);
6048 : :
6049 : 5 : bdev = allocate_bdev("bdev0");
6050 : 5 : bdev->md_interleave = false;
6051 : 5 : bdev->md_len = 8;
6052 : :
6053 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6054 : 5 : CU_ASSERT(rc == 0);
6055 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6056 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6057 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
6058 : 5 : CU_ASSERT(io_ch != NULL);
6059 : :
6060 : : /* Verify data pull/push
6061 : : * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */
6062 : 5 : ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef;
6063 : :
6064 : : /* read */
6065 : 5 : g_io_done = false;
6066 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6067 : 5 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6068 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6069 : :
6070 : 5 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6071 : :
6072 : 5 : CU_ASSERT(rc == 0);
6073 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6074 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6075 : 5 : stub_complete_io(1);
6076 [ - + ]: 5 : CU_ASSERT(g_memory_domain_push_data_called == true);
6077 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6078 : :
6079 : : /* write */
6080 : 5 : g_io_done = false;
6081 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6082 : 5 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6083 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6084 : :
6085 : 5 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6086 : :
6087 : 5 : CU_ASSERT(rc == 0);
6088 [ - + ]: 5 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6089 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6090 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6091 : 5 : stub_complete_io(1);
6092 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6093 : :
6094 : : /* Verify the request is queued after receiving ENOMEM from pull */
6095 : 5 : g_io_done = false;
6096 : 5 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6097 : 5 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6098 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6099 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6100 : 5 : CU_ASSERT(rc == 0);
6101 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6102 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6103 : :
6104 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6105 : 5 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6106 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6107 : :
6108 : 5 : MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM);
6109 : 5 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6110 : 5 : CU_ASSERT(rc == 0);
6111 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6112 : : /* The second IO has been queued */
6113 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6114 : :
6115 [ - - - + ]: 5 : MOCK_CLEAR(spdk_memory_domain_pull_data);
6116 : 5 : g_memory_domain_pull_data_called = false;
6117 : 5 : stub_complete_io(1);
6118 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6119 [ - + ]: 5 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6120 : : /* The second IO should be submitted now */
6121 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6122 : 5 : g_io_done = false;
6123 : 5 : stub_complete_io(1);
6124 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6125 : :
6126 : : /* Verify the request is queued after receiving ENOMEM from push */
6127 : 5 : g_io_done = false;
6128 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6129 : 5 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6130 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6131 : :
6132 : 5 : MOCK_SET(spdk_memory_domain_push_data, -ENOMEM);
6133 : 5 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6134 : 5 : CU_ASSERT(rc == 0);
6135 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6136 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6137 : :
6138 : 5 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6139 : 5 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6140 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6141 : 5 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6142 : 5 : CU_ASSERT(rc == 0);
6143 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6144 : :
6145 : 5 : stub_complete_io(1);
6146 : : /* The IO isn't done yet, it's still waiting on push */
6147 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6148 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6149 [ - - - + ]: 5 : MOCK_CLEAR(spdk_memory_domain_push_data);
6150 : 5 : g_memory_domain_push_data_called = false;
6151 : : /* Completing the second IO should also trigger push on the first one */
6152 : 5 : stub_complete_io(1);
6153 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6154 [ - + ]: 5 : CU_ASSERT(g_memory_domain_push_data_called == true);
6155 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6156 : :
6157 : 5 : spdk_put_io_channel(io_ch);
6158 : 5 : spdk_bdev_close(desc);
6159 : 5 : free_bdev(bdev);
6160 : 5 : ut_fini_bdev();
6161 : 5 : }
6162 : :
6163 : : static void
6164 : 5 : bdev_register_uuid_alias(void)
6165 : : {
6166 : : struct spdk_bdev *bdev, *second;
6167 : 4 : char uuid[SPDK_UUID_STRING_LEN];
6168 : : int rc;
6169 : :
6170 : 5 : ut_init_bdev(NULL);
6171 : 5 : bdev = allocate_bdev("bdev0");
6172 : :
6173 : : /* Make sure an UUID was generated */
6174 : 5 : CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid));
6175 : :
6176 : : /* Check that an UUID alias was registered */
6177 : 5 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6178 : 5 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6179 : :
6180 : : /* Unregister the bdev */
6181 : 5 : spdk_bdev_unregister(bdev, NULL, NULL);
6182 : 5 : poll_threads();
6183 : 5 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6184 : :
6185 : : /* Check the same, but this time register the bdev with non-zero UUID */
6186 : 5 : rc = spdk_bdev_register(bdev);
6187 : 5 : CU_ASSERT_EQUAL(rc, 0);
6188 : 5 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6189 : :
6190 : : /* Unregister the bdev */
6191 : 5 : spdk_bdev_unregister(bdev, NULL, NULL);
6192 : 5 : poll_threads();
6193 : 5 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6194 : :
6195 : : /* Register the bdev using UUID as the name */
6196 : 5 : bdev->name = uuid;
6197 : 5 : rc = spdk_bdev_register(bdev);
6198 : 5 : CU_ASSERT_EQUAL(rc, 0);
6199 : 5 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6200 : :
6201 : : /* Unregister the bdev */
6202 : 5 : spdk_bdev_unregister(bdev, NULL, NULL);
6203 : 5 : poll_threads();
6204 : 5 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6205 : :
6206 : : /* Check that it's not possible to register two bdevs with the same UUIDs */
6207 : 5 : bdev->name = "bdev0";
6208 : 5 : second = allocate_bdev("bdev1");
6209 : 5 : spdk_uuid_copy(&bdev->uuid, &second->uuid);
6210 : 5 : rc = spdk_bdev_register(bdev);
6211 : 5 : CU_ASSERT_EQUAL(rc, -EEXIST);
6212 : :
6213 : : /* Regenerate the UUID and re-check */
6214 : 5 : spdk_uuid_generate(&bdev->uuid);
6215 : 5 : rc = spdk_bdev_register(bdev);
6216 : 5 : CU_ASSERT_EQUAL(rc, 0);
6217 : :
6218 : : /* And check that both bdevs can be retrieved through their UUIDs */
6219 : 5 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6220 : 5 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6221 : 5 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid);
6222 : 5 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second);
6223 : :
6224 : 5 : free_bdev(second);
6225 : 5 : free_bdev(bdev);
6226 : 5 : ut_fini_bdev();
6227 : 5 : }
6228 : :
6229 : : static void
6230 : 5 : bdev_unregister_by_name(void)
6231 : : {
6232 : : struct spdk_bdev *bdev;
6233 : : int rc;
6234 : :
6235 : 5 : bdev = allocate_bdev("bdev");
6236 : :
6237 : 5 : g_event_type1 = 0xFF;
6238 : 5 : g_unregister_arg = NULL;
6239 : 5 : g_unregister_rc = -1;
6240 : :
6241 : 5 : rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6242 : 5 : CU_ASSERT(rc == -ENODEV);
6243 : :
6244 : 5 : rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6245 : 5 : CU_ASSERT(rc == -ENODEV);
6246 : :
6247 : 5 : rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6248 : 5 : CU_ASSERT(rc == 0);
6249 : :
6250 : : /* Check that unregister callback is delayed */
6251 : 5 : CU_ASSERT(g_unregister_arg == NULL);
6252 : 5 : CU_ASSERT(g_unregister_rc == -1);
6253 : :
6254 : 5 : poll_threads();
6255 : :
6256 : : /* Event callback shall not be issued because device was closed */
6257 : 5 : CU_ASSERT(g_event_type1 == 0xFF);
6258 : : /* Unregister callback is issued */
6259 : 5 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
6260 : 5 : CU_ASSERT(g_unregister_rc == 0);
6261 : :
6262 : 5 : free_bdev(bdev);
6263 : 5 : }
6264 : :
6265 : : static int
6266 : 55 : count_bdevs(void *ctx, struct spdk_bdev *bdev)
6267 : : {
6268 : 55 : int *count = ctx;
6269 : :
6270 : 55 : (*count)++;
6271 : :
6272 : 55 : return 0;
6273 : : }
6274 : :
6275 : : static void
6276 : 5 : for_each_bdev_test(void)
6277 : : {
6278 : : struct spdk_bdev *bdev[8];
6279 : 4 : int rc, count;
6280 : :
6281 : 5 : bdev[0] = allocate_bdev("bdev0");
6282 : 5 : bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING;
6283 : :
6284 : 5 : bdev[1] = allocate_bdev("bdev1");
6285 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
6286 : 5 : CU_ASSERT(rc == 0);
6287 : :
6288 : 5 : bdev[2] = allocate_bdev("bdev2");
6289 : :
6290 : 5 : bdev[3] = allocate_bdev("bdev3");
6291 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
6292 : 5 : CU_ASSERT(rc == 0);
6293 : :
6294 : 5 : bdev[4] = allocate_bdev("bdev4");
6295 : :
6296 : 5 : bdev[5] = allocate_bdev("bdev5");
6297 : 5 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
6298 : 5 : CU_ASSERT(rc == 0);
6299 : :
6300 : 5 : bdev[6] = allocate_bdev("bdev6");
6301 : :
6302 : 5 : bdev[7] = allocate_bdev("bdev7");
6303 : :
6304 : 5 : count = 0;
6305 : 5 : rc = spdk_for_each_bdev(&count, count_bdevs);
6306 : 5 : CU_ASSERT(rc == 0);
6307 : 5 : CU_ASSERT(count == 7);
6308 : :
6309 : 5 : count = 0;
6310 : 5 : rc = spdk_for_each_bdev_leaf(&count, count_bdevs);
6311 : 5 : CU_ASSERT(rc == 0);
6312 : 5 : CU_ASSERT(count == 4);
6313 : :
6314 : 5 : bdev[0]->internal.status = SPDK_BDEV_STATUS_READY;
6315 : 5 : free_bdev(bdev[0]);
6316 : 5 : free_bdev(bdev[1]);
6317 : 5 : free_bdev(bdev[2]);
6318 : 5 : free_bdev(bdev[3]);
6319 : 5 : free_bdev(bdev[4]);
6320 : 5 : free_bdev(bdev[5]);
6321 : 5 : free_bdev(bdev[6]);
6322 : 5 : free_bdev(bdev[7]);
6323 : 5 : }
6324 : :
6325 : : static void
6326 : 5 : bdev_seek_test(void)
6327 : : {
6328 : : struct spdk_bdev *bdev;
6329 : 5 : struct spdk_bdev_desc *desc = NULL;
6330 : : struct spdk_io_channel *io_ch;
6331 : : int rc;
6332 : :
6333 : 5 : ut_init_bdev(NULL);
6334 : 5 : poll_threads();
6335 : :
6336 : 5 : bdev = allocate_bdev("bdev0");
6337 : :
6338 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6339 : 5 : CU_ASSERT(rc == 0);
6340 : 5 : poll_threads();
6341 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6342 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6343 : 5 : io_ch = spdk_bdev_get_io_channel(desc);
6344 : 5 : CU_ASSERT(io_ch != NULL);
6345 : :
6346 : : /* Seek data not supported */
6347 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false);
6348 : 5 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6349 : 5 : CU_ASSERT(rc == 0);
6350 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6351 : 5 : poll_threads();
6352 : 5 : CU_ASSERT(g_seek_offset == 0);
6353 : :
6354 : : /* Seek hole not supported */
6355 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false);
6356 : 5 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6357 : 5 : CU_ASSERT(rc == 0);
6358 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6359 : 5 : poll_threads();
6360 : 5 : CU_ASSERT(g_seek_offset == UINT64_MAX);
6361 : :
6362 : : /* Seek data supported */
6363 : 5 : g_seek_data_offset = 12345;
6364 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true);
6365 : 5 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6366 : 5 : CU_ASSERT(rc == 0);
6367 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6368 : 5 : stub_complete_io(1);
6369 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6370 : 5 : CU_ASSERT(g_seek_offset == 12345);
6371 : :
6372 : : /* Seek hole supported */
6373 : 5 : g_seek_hole_offset = 67890;
6374 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true);
6375 : 5 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6376 : 5 : CU_ASSERT(rc == 0);
6377 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6378 : 5 : stub_complete_io(1);
6379 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6380 : 5 : CU_ASSERT(g_seek_offset == 67890);
6381 : :
6382 : 5 : spdk_put_io_channel(io_ch);
6383 : 5 : spdk_bdev_close(desc);
6384 : 5 : free_bdev(bdev);
6385 : 5 : ut_fini_bdev();
6386 : 5 : }
6387 : :
6388 : : static void
6389 : 5 : bdev_copy(void)
6390 : : {
6391 : : struct spdk_bdev *bdev;
6392 : 5 : struct spdk_bdev_desc *desc = NULL;
6393 : : struct spdk_io_channel *ioch;
6394 : : struct ut_expected_io *expected_io;
6395 : : uint64_t src_offset, num_blocks;
6396 : : uint32_t num_completed;
6397 : : int rc;
6398 : :
6399 : 5 : ut_init_bdev(NULL);
6400 : 5 : bdev = allocate_bdev("bdev");
6401 : :
6402 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6403 : 5 : CU_ASSERT_EQUAL(rc, 0);
6404 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6405 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6406 : 5 : ioch = spdk_bdev_get_io_channel(desc);
6407 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6408 : :
6409 : 5 : fn_table.submit_request = stub_submit_request;
6410 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6411 : :
6412 : : /* First test that if the bdev supports copy, the request won't be split */
6413 : 5 : bdev->md_len = 0;
6414 : 5 : bdev->blocklen = 512;
6415 : 5 : num_blocks = 128;
6416 : 5 : src_offset = bdev->blockcnt - num_blocks;
6417 : :
6418 : 5 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6419 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6420 : :
6421 : 5 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6422 : 5 : CU_ASSERT_EQUAL(rc, 0);
6423 : 5 : num_completed = stub_complete_io(1);
6424 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
6425 : :
6426 : : /* Check that if copy is not supported it'll still work */
6427 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0);
6428 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6429 : 5 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0);
6430 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6431 : :
6432 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6433 : :
6434 : 5 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6435 : 5 : CU_ASSERT_EQUAL(rc, 0);
6436 : 5 : num_completed = stub_complete_io(1);
6437 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
6438 : 5 : num_completed = stub_complete_io(1);
6439 : 5 : CU_ASSERT_EQUAL(num_completed, 1);
6440 : :
6441 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6442 : 5 : spdk_put_io_channel(ioch);
6443 : 5 : spdk_bdev_close(desc);
6444 : 5 : free_bdev(bdev);
6445 : 5 : ut_fini_bdev();
6446 : 5 : }
6447 : :
6448 : : static void
6449 : 5 : bdev_copy_split_test(void)
6450 : : {
6451 : : struct spdk_bdev *bdev;
6452 : 5 : struct spdk_bdev_desc *desc = NULL;
6453 : : struct spdk_io_channel *ioch;
6454 : : struct spdk_bdev_channel *bdev_ch;
6455 : : struct ut_expected_io *expected_io;
6456 : 5 : struct spdk_bdev_opts bdev_opts = {};
6457 : : uint32_t i, num_outstanding;
6458 : : uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children;
6459 : : int rc;
6460 : :
6461 : 5 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
6462 : 5 : bdev_opts.bdev_io_pool_size = 512;
6463 : 5 : bdev_opts.bdev_io_cache_size = 64;
6464 : 5 : rc = spdk_bdev_set_opts(&bdev_opts);
6465 : 5 : CU_ASSERT(rc == 0);
6466 : :
6467 : 5 : ut_init_bdev(NULL);
6468 : 5 : bdev = allocate_bdev("bdev");
6469 : :
6470 : 5 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6471 : 5 : CU_ASSERT_EQUAL(rc, 0);
6472 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6473 : 5 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6474 : 5 : ioch = spdk_bdev_get_io_channel(desc);
6475 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6476 : 5 : bdev_ch = spdk_io_channel_get_ctx(ioch);
6477 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
6478 : :
6479 : 5 : fn_table.submit_request = stub_submit_request;
6480 : 5 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6481 : :
6482 : : /* Case 1: First test the request won't be split */
6483 : 5 : num_blocks = 32;
6484 : 5 : src_offset = bdev->blockcnt - num_blocks;
6485 : :
6486 : 5 : g_io_done = false;
6487 : 5 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6488 : 5 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6489 : 5 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6490 : 5 : CU_ASSERT_EQUAL(rc, 0);
6491 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6492 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6493 : 5 : stub_complete_io(1);
6494 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6495 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6496 : :
6497 : : /* Case 2: Test the split with 2 children requests */
6498 : 5 : max_copy_blocks = 8;
6499 : 5 : bdev->max_copy = max_copy_blocks;
6500 : 5 : num_children = 2;
6501 : 5 : num_blocks = max_copy_blocks * num_children;
6502 : 5 : offset = 0;
6503 : 5 : src_offset = bdev->blockcnt - num_blocks;
6504 : :
6505 : 5 : g_io_done = false;
6506 [ + + ]: 15 : for (i = 0; i < num_children; i++) {
6507 : 10 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6508 : : src_offset + offset, max_copy_blocks);
6509 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6510 : 10 : offset += max_copy_blocks;
6511 : : }
6512 : :
6513 : 5 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6514 : 5 : CU_ASSERT_EQUAL(rc, 0);
6515 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6516 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children);
6517 : 5 : stub_complete_io(num_children);
6518 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6519 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6520 : :
6521 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
6522 : 5 : num_children = 15;
6523 : 5 : num_blocks = max_copy_blocks * num_children;
6524 : 5 : offset = 0;
6525 : 5 : src_offset = bdev->blockcnt - num_blocks;
6526 : :
6527 : 5 : g_io_done = false;
6528 [ + + ]: 80 : for (i = 0; i < num_children; i++) {
6529 : 75 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6530 : : src_offset + offset, max_copy_blocks);
6531 : 75 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6532 : 75 : offset += max_copy_blocks;
6533 : : }
6534 : :
6535 : 5 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6536 : 5 : CU_ASSERT_EQUAL(rc, 0);
6537 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6538 : :
6539 [ + + ]: 15 : while (num_children > 0) {
6540 : 10 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6541 : 10 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6542 : 10 : stub_complete_io(num_outstanding);
6543 : 10 : num_children -= num_outstanding;
6544 : : }
6545 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6546 : :
6547 : : /* Case 4: Same test scenario as the case 2 but the configuration is different.
6548 : : * Copy is not supported.
6549 : : */
6550 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6551 : :
6552 : 5 : num_children = 2;
6553 : 5 : max_copy_blocks = spdk_bdev_get_max_copy(bdev);
6554 : 5 : num_blocks = max_copy_blocks * num_children;
6555 : 5 : src_offset = bdev->blockcnt - num_blocks;
6556 : 5 : offset = 0;
6557 : :
6558 : 5 : g_io_done = false;
6559 [ + + ]: 15 : for (i = 0; i < num_children; i++) {
6560 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset,
6561 : : max_copy_blocks, 0);
6562 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6563 : 10 : src_offset += max_copy_blocks;
6564 : : }
6565 [ + + ]: 15 : for (i = 0; i < num_children; i++) {
6566 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset,
6567 : : max_copy_blocks, 0);
6568 : 10 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6569 : 10 : offset += max_copy_blocks;
6570 : : }
6571 : :
6572 : 5 : src_offset = bdev->blockcnt - num_blocks;
6573 : 5 : offset = 0;
6574 : :
6575 : 5 : rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL);
6576 : 5 : CU_ASSERT_EQUAL(rc, 0);
6577 [ - + ]: 5 : CU_ASSERT(g_io_done == false);
6578 : :
6579 [ + + ]: 10 : while (num_children > 0) {
6580 : 5 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6581 : :
6582 : : /* One copy request is split into one read and one write requests. */
6583 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6584 : 5 : stub_complete_io(num_outstanding);
6585 : 5 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6586 : 5 : stub_complete_io(num_outstanding);
6587 : :
6588 : 5 : num_children -= num_outstanding;
6589 : : }
6590 [ - + ]: 5 : CU_ASSERT(g_io_done == true);
6591 : :
6592 : 5 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6593 : :
6594 : 5 : spdk_put_io_channel(ioch);
6595 : 5 : spdk_bdev_close(desc);
6596 : 5 : free_bdev(bdev);
6597 : 5 : ut_fini_bdev();
6598 : 5 : }
6599 : :
6600 : : static void
6601 : 5 : examine_claim_v1(struct spdk_bdev *bdev)
6602 : : {
6603 : : int rc;
6604 : :
6605 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if);
6606 : 5 : CU_ASSERT(rc == 0);
6607 : 5 : }
6608 : :
6609 : : static void
6610 : 20 : examine_no_lock_held(struct spdk_bdev *bdev)
6611 : : {
6612 : 20 : CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock));
6613 : 20 : CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock));
6614 : 20 : }
6615 : :
6616 : : struct examine_claim_v2_ctx {
6617 : : struct ut_examine_ctx examine_ctx;
6618 : : enum spdk_bdev_claim_type claim_type;
6619 : : struct spdk_bdev_desc *desc;
6620 : : };
6621 : :
6622 : : static void
6623 : 5 : examine_claim_v2(struct spdk_bdev *bdev)
6624 : : {
6625 : 5 : struct examine_claim_v2_ctx *ctx = bdev->ctxt;
6626 : : int rc;
6627 : :
6628 : 5 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc);
6629 : 5 : CU_ASSERT(rc == 0);
6630 : :
6631 : 5 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if);
6632 : 5 : CU_ASSERT(rc == 0);
6633 : 5 : }
6634 : :
6635 : : static void
6636 : 5 : examine_locks(void)
6637 : : {
6638 : : struct spdk_bdev *bdev;
6639 : 5 : struct ut_examine_ctx ctx = { 0 };
6640 : 4 : struct examine_claim_v2_ctx v2_ctx;
6641 : :
6642 : : /* Without any claims, one code path is taken */
6643 : 5 : ctx.examine_config = examine_no_lock_held;
6644 : 5 : ctx.examine_disk = examine_no_lock_held;
6645 : 5 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6646 : 5 : CU_ASSERT(ctx.examine_config_count == 1);
6647 : 5 : CU_ASSERT(ctx.examine_disk_count == 1);
6648 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6649 : 5 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6650 : 5 : free_bdev(bdev);
6651 : :
6652 : : /* Exercise another path that is taken when examine_config() takes a v1 claim. */
6653 [ - + ]: 5 : memset(&ctx, 0, sizeof(ctx));
6654 : 5 : ctx.examine_config = examine_claim_v1;
6655 : 5 : ctx.examine_disk = examine_no_lock_held;
6656 : 5 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6657 : 5 : CU_ASSERT(ctx.examine_config_count == 1);
6658 : 5 : CU_ASSERT(ctx.examine_disk_count == 1);
6659 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
6660 : 5 : CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if);
6661 : 5 : spdk_bdev_module_release_bdev(bdev);
6662 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6663 : 5 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6664 : 5 : free_bdev(bdev);
6665 : :
6666 : : /* Exercise the final path that comes with v2 claims. */
6667 [ - + ]: 5 : memset(&v2_ctx, 0, sizeof(v2_ctx));
6668 : 5 : v2_ctx.examine_ctx.examine_config = examine_claim_v2;
6669 : 5 : v2_ctx.examine_ctx.examine_disk = examine_no_lock_held;
6670 : 5 : v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
6671 : 5 : bdev = allocate_bdev_ctx("bdev0", &v2_ctx);
6672 : 5 : CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1);
6673 : 5 : CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1);
6674 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6675 : 5 : spdk_bdev_close(v2_ctx.desc);
6676 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6677 : 5 : free_bdev(bdev);
6678 : 5 : }
6679 : :
6680 : : #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \
6681 : : do { \
6682 : : uint32_t len = 0; \
6683 : : struct spdk_bdev_module_claim *claim; \
6684 : : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \
6685 : : len++; \
6686 : : } \
6687 : : CU_ASSERT(len == expect); \
6688 : : } while (0)
6689 : :
6690 : : static void
6691 : 5 : claim_v2_rwo(void)
6692 : : {
6693 : : struct spdk_bdev *bdev;
6694 : 4 : struct spdk_bdev_desc *desc;
6695 : 4 : struct spdk_bdev_desc *desc2;
6696 : 4 : struct spdk_bdev_claim_opts opts;
6697 : : int rc;
6698 : :
6699 : 5 : bdev = allocate_bdev("bdev0");
6700 : :
6701 : : /* Claim without options */
6702 : 5 : desc = NULL;
6703 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6704 : 5 : CU_ASSERT(rc == 0);
6705 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6706 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6707 : : &bdev_ut_if);
6708 : 5 : CU_ASSERT(rc == 0);
6709 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6710 : 5 : CU_ASSERT(desc->claim != NULL);
6711 : 5 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6712 : 5 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6713 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6714 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6715 : :
6716 : : /* Release the claim by closing the descriptor */
6717 : 5 : spdk_bdev_close(desc);
6718 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6719 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6720 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6721 : :
6722 : : /* Claim with options */
6723 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6724 : 5 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6725 : 5 : desc = NULL;
6726 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6727 : 5 : CU_ASSERT(rc == 0);
6728 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6729 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6730 : : &bdev_ut_if);
6731 : 5 : CU_ASSERT(rc == 0);
6732 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6733 : 5 : CU_ASSERT(desc->claim != NULL);
6734 : 5 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6735 [ - + ]: 5 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6736 : 5 : memset(&opts, 0, sizeof(opts));
6737 [ - + ]: 5 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6738 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6739 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6740 : :
6741 : : /* The claim blocks new writers. */
6742 : 5 : desc2 = NULL;
6743 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6744 : 5 : CU_ASSERT(rc == -EPERM);
6745 : 5 : CU_ASSERT(desc2 == NULL);
6746 : :
6747 : : /* New readers are allowed */
6748 : 5 : desc2 = NULL;
6749 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6750 : 5 : CU_ASSERT(rc == 0);
6751 : 5 : CU_ASSERT(desc2 != NULL);
6752 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6753 : :
6754 : : /* No new v2 RWO claims are allowed */
6755 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6756 : : &bdev_ut_if);
6757 : 5 : CU_ASSERT(rc == -EPERM);
6758 : :
6759 : : /* No new v2 ROM claims are allowed */
6760 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6761 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6762 : : &bdev_ut_if);
6763 : 5 : CU_ASSERT(rc == -EPERM);
6764 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6765 : :
6766 : : /* No new v2 RWM claims are allowed */
6767 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6768 : 5 : opts.shared_claim_key = (uint64_t)&opts;
6769 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6770 : : &bdev_ut_if);
6771 : 5 : CU_ASSERT(rc == -EPERM);
6772 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6773 : :
6774 : : /* No new v1 claims are allowed */
6775 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6776 : 5 : CU_ASSERT(rc == -EPERM);
6777 : :
6778 : : /* None of the above changed the existing claim */
6779 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6780 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6781 : :
6782 : : /* Closing the first descriptor now allows a new claim and it is promoted to rw. */
6783 : 5 : spdk_bdev_close(desc);
6784 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6785 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6786 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6787 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6788 : : &bdev_ut_if);
6789 : 5 : CU_ASSERT(rc == 0);
6790 : 5 : CU_ASSERT(desc2->claim != NULL);
6791 [ - + ]: 5 : CU_ASSERT(desc2->write);
6792 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6793 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
6794 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6795 : 5 : spdk_bdev_close(desc2);
6796 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6797 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6798 : :
6799 : : /* Cannot claim with a key */
6800 : 5 : desc = NULL;
6801 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6802 : 5 : CU_ASSERT(rc == 0);
6803 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6804 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6805 : 5 : opts.shared_claim_key = (uint64_t)&opts;
6806 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6807 : : &bdev_ut_if);
6808 : 5 : CU_ASSERT(rc == -EINVAL);
6809 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6810 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6811 : 5 : spdk_bdev_close(desc);
6812 : :
6813 : : /* Clean up */
6814 : 5 : free_bdev(bdev);
6815 : 5 : }
6816 : :
6817 : : static void
6818 : 5 : claim_v2_rom(void)
6819 : : {
6820 : : struct spdk_bdev *bdev;
6821 : 4 : struct spdk_bdev_desc *desc;
6822 : 4 : struct spdk_bdev_desc *desc2;
6823 : 4 : struct spdk_bdev_claim_opts opts;
6824 : : int rc;
6825 : :
6826 : 5 : bdev = allocate_bdev("bdev0");
6827 : :
6828 : : /* Claim without options */
6829 : 5 : desc = NULL;
6830 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6831 : 5 : CU_ASSERT(rc == 0);
6832 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6833 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6834 : : &bdev_ut_if);
6835 : 5 : CU_ASSERT(rc == 0);
6836 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6837 : 5 : CU_ASSERT(desc->claim != NULL);
6838 : 5 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6839 : 5 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6840 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6841 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6842 : :
6843 : : /* Release the claim by closing the descriptor */
6844 : 5 : spdk_bdev_close(desc);
6845 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6846 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6847 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6848 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6849 : :
6850 : : /* Claim with options */
6851 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6852 : 5 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6853 : 5 : desc = NULL;
6854 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6855 : 5 : CU_ASSERT(rc == 0);
6856 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6857 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
6858 : : &bdev_ut_if);
6859 : 5 : CU_ASSERT(rc == 0);
6860 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6861 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
6862 : 5 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6863 [ - + ]: 5 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6864 : 5 : memset(&opts, 0, sizeof(opts));
6865 [ - + ]: 5 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6866 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6867 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6868 : :
6869 : : /* The claim blocks new writers. */
6870 : 5 : desc2 = NULL;
6871 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6872 : 5 : CU_ASSERT(rc == -EPERM);
6873 : 5 : CU_ASSERT(desc2 == NULL);
6874 : :
6875 : : /* New readers are allowed */
6876 : 5 : desc2 = NULL;
6877 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6878 : 5 : CU_ASSERT(rc == 0);
6879 : 5 : CU_ASSERT(desc2 != NULL);
6880 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6881 : :
6882 : : /* No new v2 RWO claims are allowed */
6883 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6884 : : &bdev_ut_if);
6885 : 5 : CU_ASSERT(rc == -EPERM);
6886 : :
6887 : : /* No new v2 RWM claims are allowed */
6888 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6889 : 5 : opts.shared_claim_key = (uint64_t)&opts;
6890 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6891 : : &bdev_ut_if);
6892 : 5 : CU_ASSERT(rc == -EPERM);
6893 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6894 : :
6895 : : /* No new v1 claims are allowed */
6896 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6897 : 5 : CU_ASSERT(rc == -EPERM);
6898 : :
6899 : : /* None of the above messed up the existing claim */
6900 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6901 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6902 : :
6903 : : /* New v2 ROM claims are allowed and the descriptor stays read-only. */
6904 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6905 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6906 : : &bdev_ut_if);
6907 : 5 : CU_ASSERT(rc == 0);
6908 [ - + ]: 5 : CU_ASSERT(!desc2->write);
6909 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6910 : 5 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
6911 [ + + ]: 15 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
6912 : :
6913 : : /* Claim remains when closing the first descriptor */
6914 : 5 : spdk_bdev_close(desc);
6915 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6916 : 5 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
6917 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
6918 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6919 : :
6920 : : /* Claim removed when closing the other descriptor */
6921 : 5 : spdk_bdev_close(desc2);
6922 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6923 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6924 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6925 : :
6926 : : /* Cannot claim with a key */
6927 : 5 : desc = NULL;
6928 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6929 : 5 : CU_ASSERT(rc == 0);
6930 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6931 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6932 : 5 : opts.shared_claim_key = (uint64_t)&opts;
6933 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
6934 : : &bdev_ut_if);
6935 : 5 : CU_ASSERT(rc == -EINVAL);
6936 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6937 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6938 : 5 : spdk_bdev_close(desc);
6939 : :
6940 : : /* Cannot claim with a read-write descriptor */
6941 : 5 : desc = NULL;
6942 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6943 : 5 : CU_ASSERT(rc == 0);
6944 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6945 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6946 : : &bdev_ut_if);
6947 : 5 : CU_ASSERT(rc == -EINVAL);
6948 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6949 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6950 : 5 : spdk_bdev_close(desc);
6951 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6952 : :
6953 : : /* Clean up */
6954 : 5 : free_bdev(bdev);
6955 : 5 : }
6956 : :
6957 : : static void
6958 : 5 : claim_v2_rwm(void)
6959 : : {
6960 : : struct spdk_bdev *bdev;
6961 : 4 : struct spdk_bdev_desc *desc;
6962 : 4 : struct spdk_bdev_desc *desc2;
6963 : 4 : struct spdk_bdev_claim_opts opts;
6964 : 4 : char good_key, bad_key;
6965 : : int rc;
6966 : :
6967 : 5 : bdev = allocate_bdev("bdev0");
6968 : :
6969 : : /* Claim without options should fail */
6970 : 5 : desc = NULL;
6971 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6972 : 5 : CU_ASSERT(rc == 0);
6973 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6974 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL,
6975 : : &bdev_ut_if);
6976 : 5 : CU_ASSERT(rc == -EINVAL);
6977 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6978 [ - + ]: 5 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6979 : 5 : CU_ASSERT(desc->claim == NULL);
6980 : :
6981 : : /* Claim with options */
6982 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6983 : 5 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6984 : 5 : opts.shared_claim_key = (uint64_t)&good_key;
6985 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6986 : : &bdev_ut_if);
6987 : 5 : CU_ASSERT(rc == 0);
6988 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
6989 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
6990 : 5 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6991 [ - + ]: 5 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6992 : 5 : memset(&opts, 0, sizeof(opts));
6993 [ - + ]: 5 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6994 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6995 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6996 : :
6997 : : /* The claim blocks new writers. */
6998 : 5 : desc2 = NULL;
6999 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7000 : 5 : CU_ASSERT(rc == -EPERM);
7001 : 5 : CU_ASSERT(desc2 == NULL);
7002 : :
7003 : : /* New readers are allowed */
7004 : 5 : desc2 = NULL;
7005 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
7006 : 5 : CU_ASSERT(rc == 0);
7007 : 5 : CU_ASSERT(desc2 != NULL);
7008 [ - + ]: 5 : CU_ASSERT(!desc2->write);
7009 : :
7010 : : /* No new v2 RWO claims are allowed */
7011 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
7012 : : &bdev_ut_if);
7013 : 5 : CU_ASSERT(rc == -EPERM);
7014 : :
7015 : : /* No new v2 ROM claims are allowed and the descriptor stays read-only. */
7016 [ - + ]: 5 : CU_ASSERT(!desc2->write);
7017 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7018 : : &bdev_ut_if);
7019 : 5 : CU_ASSERT(rc == -EPERM);
7020 [ - + ]: 5 : CU_ASSERT(!desc2->write);
7021 : :
7022 : : /* No new v1 claims are allowed */
7023 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7024 : 5 : CU_ASSERT(rc == -EPERM);
7025 : :
7026 : : /* No new v2 RWM claims are allowed if the key does not match */
7027 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7028 : 5 : opts.shared_claim_key = (uint64_t)&bad_key;
7029 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7030 : : &bdev_ut_if);
7031 : 5 : CU_ASSERT(rc == -EPERM);
7032 [ - + ]: 5 : CU_ASSERT(!desc2->write);
7033 : :
7034 : : /* None of the above messed up the existing claim */
7035 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7036 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7037 : :
7038 : : /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */
7039 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7040 : 5 : opts.shared_claim_key = (uint64_t)&good_key;
7041 [ - + ]: 5 : CU_ASSERT(!desc2->write);
7042 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7043 : : &bdev_ut_if);
7044 : 5 : CU_ASSERT(rc == 0);
7045 [ - + ]: 5 : CU_ASSERT(desc2->write);
7046 : 5 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
7047 [ + + ]: 15 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
7048 : :
7049 : : /* Claim remains when closing the first descriptor */
7050 : 5 : spdk_bdev_close(desc);
7051 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
7052 : 5 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
7053 : 5 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
7054 [ + + ]: 10 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7055 : :
7056 : : /* Claim removed when closing the other descriptor */
7057 : 5 : spdk_bdev_close(desc2);
7058 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7059 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7060 : :
7061 : : /* Cannot claim without a key */
7062 : 5 : desc = NULL;
7063 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7064 : 5 : CU_ASSERT(rc == 0);
7065 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7066 : 5 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7067 : 5 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7068 : : &bdev_ut_if);
7069 : 5 : CU_ASSERT(rc == -EINVAL);
7070 : 5 : spdk_bdev_close(desc);
7071 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7072 : 5 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7073 : :
7074 : : /* Clean up */
7075 : 5 : free_bdev(bdev);
7076 : 5 : }
7077 : :
7078 : : static void
7079 : 5 : claim_v2_existing_writer(void)
7080 : : {
7081 : : struct spdk_bdev *bdev;
7082 : 4 : struct spdk_bdev_desc *desc;
7083 : 4 : struct spdk_bdev_desc *desc2;
7084 : 4 : struct spdk_bdev_claim_opts opts;
7085 : : enum spdk_bdev_claim_type type;
7086 : 5 : enum spdk_bdev_claim_type types[] = {
7087 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7088 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7089 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7090 : : };
7091 : : size_t i;
7092 : : int rc;
7093 : :
7094 : 5 : bdev = allocate_bdev("bdev0");
7095 : :
7096 : 5 : desc = NULL;
7097 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7098 : 5 : CU_ASSERT(rc == 0);
7099 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7100 : 5 : desc2 = NULL;
7101 : 5 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7102 : 5 : CU_ASSERT(rc == 0);
7103 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc2 != NULL);
7104 : :
7105 [ + + ]: 20 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7106 : 15 : type = types[i];
7107 : 15 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7108 [ + + ]: 15 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7109 : 5 : opts.shared_claim_key = (uint64_t)&opts;
7110 : : }
7111 : 15 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7112 [ + + ]: 15 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7113 : 5 : CU_ASSERT(rc == -EINVAL);
7114 : : } else {
7115 : 10 : CU_ASSERT(rc == -EPERM);
7116 : : }
7117 : 15 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7118 : 15 : rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if);
7119 [ + + ]: 15 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7120 : 5 : CU_ASSERT(rc == -EINVAL);
7121 : : } else {
7122 : 10 : CU_ASSERT(rc == -EPERM);
7123 : : }
7124 : 15 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7125 : : }
7126 : :
7127 : 5 : spdk_bdev_close(desc);
7128 : 5 : spdk_bdev_close(desc2);
7129 : :
7130 : : /* Clean up */
7131 : 5 : free_bdev(bdev);
7132 : 5 : }
7133 : :
7134 : : static void
7135 : 5 : claim_v2_existing_v1(void)
7136 : : {
7137 : : struct spdk_bdev *bdev;
7138 : 4 : struct spdk_bdev_desc *desc;
7139 : 4 : struct spdk_bdev_claim_opts opts;
7140 : : enum spdk_bdev_claim_type type;
7141 : 5 : enum spdk_bdev_claim_type types[] = {
7142 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7143 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7144 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7145 : : };
7146 : : size_t i;
7147 : : int rc;
7148 : :
7149 : 5 : bdev = allocate_bdev("bdev0");
7150 : :
7151 : 5 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7152 : 5 : CU_ASSERT(rc == 0);
7153 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7154 : :
7155 : 5 : desc = NULL;
7156 : 5 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7157 : 5 : CU_ASSERT(rc == 0);
7158 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7159 : :
7160 [ + + ]: 20 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7161 : 15 : type = types[i];
7162 : 15 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7163 [ + + ]: 15 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7164 : 5 : opts.shared_claim_key = (uint64_t)&opts;
7165 : : }
7166 : 15 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7167 : 15 : CU_ASSERT(rc == -EPERM);
7168 : 15 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7169 : : }
7170 : :
7171 : 5 : spdk_bdev_module_release_bdev(bdev);
7172 : 5 : spdk_bdev_close(desc);
7173 : :
7174 : : /* Clean up */
7175 : 5 : free_bdev(bdev);
7176 : 5 : }
7177 : :
7178 : : static void
7179 : 5 : claim_v1_existing_v2(void)
7180 : : {
7181 : : struct spdk_bdev *bdev;
7182 : 4 : struct spdk_bdev_desc *desc;
7183 : 4 : struct spdk_bdev_claim_opts opts;
7184 : : enum spdk_bdev_claim_type type;
7185 : 5 : enum spdk_bdev_claim_type types[] = {
7186 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7187 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7188 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7189 : : };
7190 : : size_t i;
7191 : : int rc;
7192 : :
7193 : 5 : bdev = allocate_bdev("bdev0");
7194 : :
7195 [ + + ]: 20 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7196 : 15 : type = types[i];
7197 : :
7198 : 15 : desc = NULL;
7199 : 15 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7200 : 15 : CU_ASSERT(rc == 0);
7201 [ - + ]: 15 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7202 : :
7203 : : /* Get a v2 claim */
7204 : 15 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7205 [ + + ]: 15 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7206 : 5 : opts.shared_claim_key = (uint64_t)&opts;
7207 : : }
7208 : 15 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7209 : 15 : CU_ASSERT(rc == 0);
7210 : :
7211 : : /* Fail to get a v1 claim */
7212 : 15 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7213 : 15 : CU_ASSERT(rc == -EPERM);
7214 : :
7215 : 15 : spdk_bdev_close(desc);
7216 : :
7217 : : /* Now v1 succeeds */
7218 : 15 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7219 : 15 : CU_ASSERT(rc == 0)
7220 : 15 : spdk_bdev_module_release_bdev(bdev);
7221 : : }
7222 : :
7223 : : /* Clean up */
7224 : 5 : free_bdev(bdev);
7225 : 5 : }
7226 : :
7227 : : static int ut_examine_claimed_init0(void);
7228 : : static int ut_examine_claimed_init1(void);
7229 : : static void ut_examine_claimed_config0(struct spdk_bdev *bdev);
7230 : : static void ut_examine_claimed_disk0(struct spdk_bdev *bdev);
7231 : : static void ut_examine_claimed_config1(struct spdk_bdev *bdev);
7232 : : static void ut_examine_claimed_disk1(struct spdk_bdev *bdev);
7233 : :
7234 : : #define UT_MAX_EXAMINE_MODS 2
7235 : : struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = {
7236 : : {
7237 : : .name = "vbdev_ut_examine0",
7238 : : .module_init = ut_examine_claimed_init0,
7239 : : .module_fini = vbdev_ut_module_fini,
7240 : : .examine_config = ut_examine_claimed_config0,
7241 : : .examine_disk = ut_examine_claimed_disk0,
7242 : : },
7243 : : {
7244 : : .name = "vbdev_ut_examine1",
7245 : : .module_init = ut_examine_claimed_init1,
7246 : : .module_fini = vbdev_ut_module_fini,
7247 : : .examine_config = ut_examine_claimed_config1,
7248 : : .examine_disk = ut_examine_claimed_disk1,
7249 : : }
7250 : : };
7251 : :
7252 : 5 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0])
7253 : 5 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1])
7254 : :
7255 : : struct ut_examine_claimed_ctx {
7256 : : uint32_t examine_config_count;
7257 : : uint32_t examine_disk_count;
7258 : :
7259 : : /* Claim type to take, with these options */
7260 : : enum spdk_bdev_claim_type claim_type;
7261 : : struct spdk_bdev_claim_opts claim_opts;
7262 : :
7263 : : /* Expected return value from spdk_bdev_module_claim_bdev_desc() */
7264 : : int expect_claim_err;
7265 : :
7266 : : /* Descriptor used for a claim */
7267 : : struct spdk_bdev_desc *desc;
7268 : : } examine_claimed_ctx[UT_MAX_EXAMINE_MODS];
7269 : :
7270 : : bool ut_testing_examine_claimed;
7271 : :
7272 : : /*
7273 : : * Store the order in which the modules were initialized,
7274 : : * since we have no guarantee on the order of execution of the constructors.
7275 : : * Modules are examined in reverse order of their initialization.
7276 : : */
7277 : : static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS];
7278 : : static int
7279 : 400 : ut_examine_claimed_init(uint32_t modnum)
7280 : : {
7281 : : static int current = UT_MAX_EXAMINE_MODS;
7282 : :
7283 : : /* Only do this for the first initialization of the bdev framework */
7284 [ + + ]: 400 : if (current == 0) {
7285 : 390 : return 0;
7286 : : }
7287 : 10 : g_ut_examine_claimed_order[modnum] = --current;
7288 : :
7289 : 10 : return 0;
7290 : : }
7291 : :
7292 : : static int
7293 : 200 : ut_examine_claimed_init0(void)
7294 : : {
7295 : 200 : return ut_examine_claimed_init(0);
7296 : : }
7297 : :
7298 : : static int
7299 : 200 : ut_examine_claimed_init1(void)
7300 : : {
7301 : 200 : return ut_examine_claimed_init(1);
7302 : : }
7303 : :
7304 : : static void
7305 : 20 : reset_examine_claimed_ctx(void)
7306 : : {
7307 : : struct ut_examine_claimed_ctx *ctx;
7308 : : uint32_t i;
7309 : :
7310 [ + + ]: 60 : for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) {
7311 : 40 : ctx = &examine_claimed_ctx[i];
7312 [ + + ]: 40 : if (ctx->desc != NULL) {
7313 : 25 : spdk_bdev_close(ctx->desc);
7314 : : }
7315 [ - + ]: 40 : memset(ctx, 0, sizeof(*ctx));
7316 : 40 : spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts));
7317 : : }
7318 : 20 : }
7319 : :
7320 : : static void
7321 : 800 : examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum)
7322 : : {
7323 [ - + ]: 800 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7324 : 800 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7325 : 800 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7326 : : int rc;
7327 : :
7328 [ + + + + ]: 800 : if (!ut_testing_examine_claimed) {
7329 : 770 : spdk_bdev_module_examine_done(module);
7330 : 770 : return;
7331 : : }
7332 : :
7333 : 30 : ctx->examine_config_count++;
7334 : :
7335 [ + + ]: 30 : if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) {
7336 : 25 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts,
7337 : : &ctx->desc);
7338 : 25 : CU_ASSERT(rc == 0);
7339 : :
7340 : 25 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module);
7341 : 25 : CU_ASSERT(rc == ctx->expect_claim_err);
7342 : : }
7343 : 30 : spdk_bdev_module_examine_done(module);
7344 : : }
7345 : :
7346 : : static void
7347 : 400 : ut_examine_claimed_config0(struct spdk_bdev *bdev)
7348 : : {
7349 : 400 : examine_claimed_config(bdev, g_ut_examine_claimed_order[0]);
7350 : 400 : }
7351 : :
7352 : : static void
7353 : 400 : ut_examine_claimed_config1(struct spdk_bdev *bdev)
7354 : : {
7355 : 400 : examine_claimed_config(bdev, g_ut_examine_claimed_order[1]);
7356 : 400 : }
7357 : :
7358 : : static void
7359 : 770 : examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum)
7360 : : {
7361 [ - + ]: 770 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7362 : 770 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7363 : 770 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7364 : :
7365 [ + + + + ]: 770 : if (!ut_testing_examine_claimed) {
7366 : 750 : spdk_bdev_module_examine_done(module);
7367 : 750 : return;
7368 : : }
7369 : :
7370 : 20 : ctx->examine_disk_count++;
7371 : :
7372 : 20 : spdk_bdev_module_examine_done(module);
7373 : : }
7374 : :
7375 : : static void
7376 : 385 : ut_examine_claimed_disk0(struct spdk_bdev *bdev)
7377 : : {
7378 : 385 : examine_claimed_disk(bdev, 0);
7379 : 385 : }
7380 : :
7381 : : static void
7382 : 385 : ut_examine_claimed_disk1(struct spdk_bdev *bdev)
7383 : : {
7384 : 385 : examine_claimed_disk(bdev, 1);
7385 : 385 : }
7386 : :
7387 : : static void
7388 : 5 : examine_claimed(void)
7389 : : {
7390 : : struct spdk_bdev *bdev;
7391 : 5 : struct spdk_bdev_module *mod = examine_claimed_mods;
7392 : 5 : struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx;
7393 : :
7394 : 5 : ut_testing_examine_claimed = true;
7395 : 5 : reset_examine_claimed_ctx();
7396 : :
7397 : : /*
7398 : : * With one module claiming, both modules' examine_config should be called, but only the
7399 : : * claiming module's examine_disk should be called.
7400 : : */
7401 : 5 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7402 : 5 : bdev = allocate_bdev("bdev0");
7403 : 5 : CU_ASSERT(ctx[0].examine_config_count == 1);
7404 : 5 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7405 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7406 : 5 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7407 : 5 : CU_ASSERT(ctx[1].examine_config_count == 1);
7408 : 5 : CU_ASSERT(ctx[1].examine_disk_count == 0);
7409 : 5 : CU_ASSERT(ctx[1].desc == NULL);
7410 : 5 : reset_examine_claimed_ctx();
7411 : 5 : free_bdev(bdev);
7412 : :
7413 : : /*
7414 : : * With two modules claiming, both modules' examine_config and examine_disk should be
7415 : : * called.
7416 : : */
7417 : 5 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7418 : 5 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7419 : 5 : bdev = allocate_bdev("bdev0");
7420 : 5 : CU_ASSERT(ctx[0].examine_config_count == 1);
7421 : 5 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7422 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7423 : 5 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7424 : 5 : CU_ASSERT(ctx[1].examine_config_count == 1);
7425 : 5 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7426 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7427 : 5 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7428 : 5 : reset_examine_claimed_ctx();
7429 : 5 : free_bdev(bdev);
7430 : :
7431 : : /*
7432 : : * If two vbdev modules try to claim with conflicting claim types, the module that was added
7433 : : * last wins. The winner gets the claim and is the only one that has its examine_disk
7434 : : * callback invoked.
7435 : : */
7436 : 5 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7437 : 5 : ctx[0].expect_claim_err = -EPERM;
7438 : 5 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE;
7439 : 5 : bdev = allocate_bdev("bdev0");
7440 : 5 : CU_ASSERT(ctx[0].examine_config_count == 1);
7441 : 5 : CU_ASSERT(ctx[0].examine_disk_count == 0);
7442 : 5 : CU_ASSERT(ctx[1].examine_config_count == 1);
7443 : 5 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7444 [ - + ]: 5 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7445 : 5 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7446 : 5 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
7447 : 5 : reset_examine_claimed_ctx();
7448 : 5 : free_bdev(bdev);
7449 : :
7450 : 5 : ut_testing_examine_claimed = false;
7451 : 5 : }
7452 : :
7453 : : int
7454 : 5 : main(int argc, char **argv)
7455 : : {
7456 : 5 : CU_pSuite suite = NULL;
7457 : : unsigned int num_failures;
7458 : :
7459 : 5 : CU_initialize_registry();
7460 : :
7461 : 5 : suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown);
7462 : :
7463 : 5 : CU_ADD_TEST(suite, bytes_to_blocks_test);
7464 : 5 : CU_ADD_TEST(suite, num_blocks_test);
7465 : 5 : CU_ADD_TEST(suite, io_valid_test);
7466 : 5 : CU_ADD_TEST(suite, open_write_test);
7467 : 5 : CU_ADD_TEST(suite, claim_test);
7468 : 5 : CU_ADD_TEST(suite, alias_add_del_test);
7469 : 5 : CU_ADD_TEST(suite, get_device_stat_test);
7470 : 5 : CU_ADD_TEST(suite, bdev_io_types_test);
7471 : 5 : CU_ADD_TEST(suite, bdev_io_wait_test);
7472 : 5 : CU_ADD_TEST(suite, bdev_io_spans_split_test);
7473 : 5 : CU_ADD_TEST(suite, bdev_io_boundary_split_test);
7474 : 5 : CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test);
7475 : 5 : CU_ADD_TEST(suite, bdev_io_mix_split_test);
7476 : 5 : CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
7477 : 5 : CU_ADD_TEST(suite, bdev_io_write_unit_split_test);
7478 : 5 : CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
7479 : 5 : CU_ADD_TEST(suite, bdev_io_alignment);
7480 : 5 : CU_ADD_TEST(suite, bdev_histograms);
7481 : 5 : CU_ADD_TEST(suite, bdev_write_zeroes);
7482 : 5 : CU_ADD_TEST(suite, bdev_compare_and_write);
7483 : 5 : CU_ADD_TEST(suite, bdev_compare);
7484 : 5 : CU_ADD_TEST(suite, bdev_compare_emulated);
7485 : 5 : CU_ADD_TEST(suite, bdev_zcopy_write);
7486 : 5 : CU_ADD_TEST(suite, bdev_zcopy_read);
7487 : 5 : CU_ADD_TEST(suite, bdev_open_while_hotremove);
7488 : 5 : CU_ADD_TEST(suite, bdev_close_while_hotremove);
7489 : 5 : CU_ADD_TEST(suite, bdev_open_ext_test);
7490 : 5 : CU_ADD_TEST(suite, bdev_open_ext_unregister);
7491 : 5 : CU_ADD_TEST(suite, bdev_set_io_timeout);
7492 : 5 : CU_ADD_TEST(suite, bdev_set_qd_sampling);
7493 : 5 : CU_ADD_TEST(suite, lba_range_overlap);
7494 : 5 : CU_ADD_TEST(suite, lock_lba_range_check_ranges);
7495 : 5 : CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
7496 : 5 : CU_ADD_TEST(suite, lock_lba_range_overlapped);
7497 : 5 : CU_ADD_TEST(suite, bdev_quiesce);
7498 : 5 : CU_ADD_TEST(suite, bdev_io_abort);
7499 : 5 : CU_ADD_TEST(suite, bdev_unmap);
7500 : 5 : CU_ADD_TEST(suite, bdev_write_zeroes_split_test);
7501 : 5 : CU_ADD_TEST(suite, bdev_set_options_test);
7502 : 5 : CU_ADD_TEST(suite, bdev_get_memory_domains);
7503 : 5 : CU_ADD_TEST(suite, bdev_io_ext);
7504 : 5 : CU_ADD_TEST(suite, bdev_io_ext_no_opts);
7505 : 5 : CU_ADD_TEST(suite, bdev_io_ext_invalid_opts);
7506 : 5 : CU_ADD_TEST(suite, bdev_io_ext_split);
7507 : 5 : CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer);
7508 : 5 : CU_ADD_TEST(suite, bdev_register_uuid_alias);
7509 : 5 : CU_ADD_TEST(suite, bdev_unregister_by_name);
7510 : 5 : CU_ADD_TEST(suite, for_each_bdev_test);
7511 : 5 : CU_ADD_TEST(suite, bdev_seek_test);
7512 : 5 : CU_ADD_TEST(suite, bdev_copy);
7513 : 5 : CU_ADD_TEST(suite, bdev_copy_split_test);
7514 : 5 : CU_ADD_TEST(suite, examine_locks);
7515 : 5 : CU_ADD_TEST(suite, claim_v2_rwo);
7516 : 5 : CU_ADD_TEST(suite, claim_v2_rom);
7517 : 5 : CU_ADD_TEST(suite, claim_v2_rwm);
7518 : 5 : CU_ADD_TEST(suite, claim_v2_existing_writer);
7519 : 5 : CU_ADD_TEST(suite, claim_v2_existing_v1);
7520 : 5 : CU_ADD_TEST(suite, claim_v1_existing_v2);
7521 : 5 : CU_ADD_TEST(suite, examine_claimed);
7522 : :
7523 : 5 : allocate_cores(1);
7524 : 5 : allocate_threads(1);
7525 : 5 : set_thread(0);
7526 : :
7527 : 5 : num_failures = spdk_ut_run_tests(argc, argv, NULL);
7528 : 5 : CU_cleanup_registry();
7529 : :
7530 : 5 : free_threads();
7531 : 5 : free_cores();
7532 : :
7533 : 5 : return num_failures;
7534 : : }
|