Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : /*
8 : * This is a simple example of a virtual block device module that passes IO
9 : * down to a bdev (or bdevs) that its configured to attach to.
10 : */
11 :
12 : #include "spdk/stdinc.h"
13 :
14 : #include "vbdev_passthru.h"
15 : #include "spdk/rpc.h"
16 : #include "spdk/env.h"
17 : #include "spdk/endian.h"
18 : #include "spdk/string.h"
19 : #include "spdk/thread.h"
20 : #include "spdk/util.h"
21 :
22 : #include "spdk/bdev_module.h"
23 : #include "spdk/log.h"
24 :
25 : /* This namespace UUID was generated using uuid_generate() method. */
26 : #define BDEV_PASSTHRU_NAMESPACE_UUID "7e25812e-c8c0-4d3f-8599-16d790555b85"
27 :
28 : static int vbdev_passthru_init(void);
29 : static int vbdev_passthru_get_ctx_size(void);
30 : static void vbdev_passthru_examine(struct spdk_bdev *bdev);
31 : static void vbdev_passthru_finish(void);
32 : static int vbdev_passthru_config_json(struct spdk_json_write_ctx *w);
33 :
34 : static struct spdk_bdev_module passthru_if = {
35 : .name = "passthru",
36 : .module_init = vbdev_passthru_init,
37 : .get_ctx_size = vbdev_passthru_get_ctx_size,
38 : .examine_config = vbdev_passthru_examine,
39 : .module_fini = vbdev_passthru_finish,
40 : .config_json = vbdev_passthru_config_json
41 : };
42 :
43 0 : SPDK_BDEV_MODULE_REGISTER(passthru, &passthru_if)
44 :
45 : /* List of pt_bdev names and their base bdevs via configuration file.
46 : * Used so we can parse the conf once at init and use this list in examine().
47 : */
48 : struct bdev_names {
49 : char *vbdev_name;
50 : char *bdev_name;
51 : struct spdk_uuid uuid;
52 : TAILQ_ENTRY(bdev_names) link;
53 : };
54 : static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names);
55 :
56 : /* List of virtual bdevs and associated info for each. */
57 : struct vbdev_passthru {
58 : struct spdk_bdev *base_bdev; /* the thing we're attaching to */
59 : struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */
60 : struct spdk_bdev pt_bdev; /* the PT virtual bdev */
61 : TAILQ_ENTRY(vbdev_passthru) link;
62 : struct spdk_thread *thread; /* thread where base device is opened */
63 : };
64 : static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes);
65 :
66 : /* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code.
67 : * If this vbdev needed to implement a poller or a queue for IO, this is where those things
68 : * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could
69 : * simply pass back the channel of the bdev underneath it but for example purposes we will
70 : * present its own to the upper layers.
71 : */
72 : struct pt_io_channel {
73 : struct spdk_io_channel *base_ch; /* IO channel of base device */
74 : };
75 :
76 : /* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO
77 : * context that we get handed by the bdev layer.
78 : */
79 : struct passthru_bdev_io {
80 : uint8_t test;
81 :
82 : /* bdev related */
83 : struct spdk_io_channel *ch;
84 :
85 : /* for bdev_io_wait */
86 : struct spdk_bdev_io_wait_entry bdev_io_wait;
87 : };
88 :
89 : static void vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
90 :
91 :
92 : /* Callback for unregistering the IO device. */
93 : static void
94 0 : _device_unregister_cb(void *io_device)
95 : {
96 0 : struct vbdev_passthru *pt_node = io_device;
97 :
98 : /* Done with this pt_node. */
99 0 : free(pt_node->pt_bdev.name);
100 0 : free(pt_node);
101 0 : }
102 :
103 : /* Wrapper for the bdev close operation. */
104 : static void
105 0 : _vbdev_passthru_destruct(void *ctx)
106 : {
107 0 : struct spdk_bdev_desc *desc = ctx;
108 :
109 0 : spdk_bdev_close(desc);
110 0 : }
111 :
112 : /* Called after we've unregistered following a hot remove callback.
113 : * Our finish entry point will be called next.
114 : */
115 : static int
116 0 : vbdev_passthru_destruct(void *ctx)
117 : {
118 0 : struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
119 :
120 : /* It is important to follow this exact sequence of steps for destroying
121 : * a vbdev...
122 : */
123 :
124 0 : TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
125 :
126 : /* Unclaim the underlying bdev. */
127 0 : spdk_bdev_module_release_bdev(pt_node->base_bdev);
128 :
129 : /* Close the underlying bdev on its same opened thread. */
130 0 : if (pt_node->thread && pt_node->thread != spdk_get_thread()) {
131 0 : spdk_thread_send_msg(pt_node->thread, _vbdev_passthru_destruct, pt_node->base_desc);
132 : } else {
133 0 : spdk_bdev_close(pt_node->base_desc);
134 : }
135 :
136 : /* Unregister the io_device. */
137 0 : spdk_io_device_unregister(pt_node, _device_unregister_cb);
138 :
139 0 : return 0;
140 : }
141 :
142 : /* Completion callback for IO that were issued from this bdev. The original bdev_io
143 : * is passed in as an arg so we'll complete that one with the appropriate status
144 : * and then free the one that this module issued.
145 : */
146 : static void
147 0 : _pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
148 : {
149 0 : struct spdk_bdev_io *orig_io = cb_arg;
150 0 : int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
151 0 : struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx;
152 :
153 : /* We setup this value in the submission routine, just showing here that it is
154 : * passed back to us.
155 : */
156 0 : if (io_ctx->test != 0x5a) {
157 0 : SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n",
158 : io_ctx->test);
159 : }
160 :
161 : /* Complete the original IO and then free the one that we created here
162 : * as a result of issuing an IO via submit_request.
163 : */
164 0 : spdk_bdev_io_complete(orig_io, status);
165 0 : spdk_bdev_free_io(bdev_io);
166 0 : }
167 :
168 : static void
169 0 : _pt_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
170 : {
171 0 : struct spdk_bdev_io *orig_io = cb_arg;
172 0 : int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
173 0 : struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx;
174 :
175 : /* We setup this value in the submission routine, just showing here that it is
176 : * passed back to us.
177 : */
178 0 : if (io_ctx->test != 0x5a) {
179 0 : SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n",
180 : io_ctx->test);
181 : }
182 :
183 : /* Complete the original IO and then free the one that we created here
184 : * as a result of issuing an IO via submit_request.
185 : */
186 0 : spdk_bdev_io_set_buf(orig_io, bdev_io->u.bdev.iovs[0].iov_base, bdev_io->u.bdev.iovs[0].iov_len);
187 0 : spdk_bdev_io_complete(orig_io, status);
188 0 : spdk_bdev_free_io(bdev_io);
189 0 : }
190 :
191 : static void
192 0 : vbdev_passthru_resubmit_io(void *arg)
193 : {
194 0 : struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
195 0 : struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
196 :
197 0 : vbdev_passthru_submit_request(io_ctx->ch, bdev_io);
198 0 : }
199 :
200 : static void
201 0 : vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io)
202 : {
203 0 : struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
204 0 : struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(io_ctx->ch);
205 : int rc;
206 :
207 0 : io_ctx->bdev_io_wait.bdev = bdev_io->bdev;
208 0 : io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io;
209 0 : io_ctx->bdev_io_wait.cb_arg = bdev_io;
210 :
211 : /* Queue the IO using the channel of the base device. */
212 0 : rc = spdk_bdev_queue_io_wait(bdev_io->bdev, pt_ch->base_ch, &io_ctx->bdev_io_wait);
213 0 : if (rc != 0) {
214 0 : SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc);
215 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
216 : }
217 0 : }
218 :
219 : static void
220 0 : pt_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
221 : {
222 0 : memset(opts, 0, sizeof(*opts));
223 0 : opts->size = sizeof(*opts);
224 0 : opts->memory_domain = bdev_io->u.bdev.memory_domain;
225 0 : opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
226 0 : opts->metadata = bdev_io->u.bdev.md_buf;
227 0 : }
228 :
229 : /* Callback for getting a buf from the bdev pool in the event that the caller passed
230 : * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
231 : * beneath us before we're done with it. That won't happen in this example but it could
232 : * if this example were used as a template for something more complex.
233 : */
234 : static void
235 0 : pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
236 : {
237 0 : struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru,
238 : pt_bdev);
239 0 : struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
240 0 : struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
241 0 : struct spdk_bdev_ext_io_opts io_opts;
242 : int rc;
243 :
244 0 : if (!success) {
245 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
246 0 : return;
247 : }
248 :
249 0 : pt_init_ext_io_opts(bdev_io, &io_opts);
250 0 : rc = spdk_bdev_readv_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
251 : bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
252 : bdev_io->u.bdev.num_blocks, _pt_complete_io,
253 : bdev_io, &io_opts);
254 0 : if (rc != 0) {
255 0 : if (rc == -ENOMEM) {
256 0 : SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
257 0 : io_ctx->ch = ch;
258 0 : vbdev_passthru_queue_io(bdev_io);
259 : } else {
260 0 : SPDK_ERRLOG("ERROR on bdev_io submission!\n");
261 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
262 : }
263 : }
264 : }
265 :
266 : /* Called when someone above submits IO to this pt vbdev. We're simply passing it on here
267 : * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided
268 : * below along with the original bdev_io so that we can complete it once this IO completes.
269 : */
270 : static void
271 0 : vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
272 : {
273 0 : struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev);
274 0 : struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
275 0 : struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
276 0 : struct spdk_bdev_ext_io_opts io_opts;
277 0 : int rc = 0;
278 :
279 : /* Setup a per IO context value; we don't do anything with it in the vbdev other
280 : * than confirm we get the same thing back in the completion callback just to
281 : * demonstrate.
282 : */
283 0 : io_ctx->test = 0x5a;
284 :
285 0 : switch (bdev_io->type) {
286 0 : case SPDK_BDEV_IO_TYPE_READ:
287 0 : spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb,
288 0 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
289 0 : break;
290 0 : case SPDK_BDEV_IO_TYPE_WRITE:
291 0 : pt_init_ext_io_opts(bdev_io, &io_opts);
292 0 : rc = spdk_bdev_writev_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
293 : bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
294 : bdev_io->u.bdev.num_blocks, _pt_complete_io,
295 : bdev_io, &io_opts);
296 0 : break;
297 0 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
298 0 : rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch,
299 : bdev_io->u.bdev.offset_blocks,
300 : bdev_io->u.bdev.num_blocks,
301 : _pt_complete_io, bdev_io);
302 0 : break;
303 0 : case SPDK_BDEV_IO_TYPE_UNMAP:
304 0 : rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch,
305 : bdev_io->u.bdev.offset_blocks,
306 : bdev_io->u.bdev.num_blocks,
307 : _pt_complete_io, bdev_io);
308 0 : break;
309 0 : case SPDK_BDEV_IO_TYPE_FLUSH:
310 0 : rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch,
311 : bdev_io->u.bdev.offset_blocks,
312 : bdev_io->u.bdev.num_blocks,
313 : _pt_complete_io, bdev_io);
314 0 : break;
315 0 : case SPDK_BDEV_IO_TYPE_RESET:
316 0 : rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch,
317 : _pt_complete_io, bdev_io);
318 0 : break;
319 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
320 0 : rc = spdk_bdev_zcopy_start(pt_node->base_desc, pt_ch->base_ch, NULL, 0,
321 : bdev_io->u.bdev.offset_blocks,
322 0 : bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate,
323 : _pt_complete_zcopy_io, bdev_io);
324 0 : break;
325 0 : case SPDK_BDEV_IO_TYPE_ABORT:
326 0 : rc = spdk_bdev_abort(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.abort.bio_to_abort,
327 : _pt_complete_io, bdev_io);
328 0 : break;
329 0 : case SPDK_BDEV_IO_TYPE_COPY:
330 0 : rc = spdk_bdev_copy_blocks(pt_node->base_desc, pt_ch->base_ch,
331 : bdev_io->u.bdev.offset_blocks,
332 : bdev_io->u.bdev.copy.src_offset_blocks,
333 : bdev_io->u.bdev.num_blocks,
334 : _pt_complete_io, bdev_io);
335 0 : break;
336 0 : default:
337 0 : SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type);
338 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
339 0 : return;
340 : }
341 0 : if (rc != 0) {
342 0 : if (rc == -ENOMEM) {
343 0 : SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
344 0 : io_ctx->ch = ch;
345 0 : vbdev_passthru_queue_io(bdev_io);
346 : } else {
347 0 : SPDK_ERRLOG("ERROR on bdev_io submission!\n");
348 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
349 : }
350 : }
351 : }
352 :
353 : /* We'll just call the base bdev and let it answer however if we were more
354 : * restrictive for some reason (or less) we could get the response back
355 : * and modify according to our purposes.
356 : */
357 : static bool
358 0 : vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
359 : {
360 0 : struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
361 :
362 0 : return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type);
363 : }
364 :
365 : /* We supplied this as an entry point for upper layers who want to communicate to this
366 : * bdev. This is how they get a channel. We are passed the same context we provided when
367 : * we created our PT vbdev in examine() which, for this bdev, is the address of one of
368 : * our context nodes. From here we'll ask the SPDK channel code to fill out our channel
369 : * struct and we'll keep it in our PT node.
370 : */
371 : static struct spdk_io_channel *
372 0 : vbdev_passthru_get_io_channel(void *ctx)
373 : {
374 0 : struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
375 0 : struct spdk_io_channel *pt_ch = NULL;
376 :
377 : /* The IO channel code will allocate a channel for us which consists of
378 : * the SPDK channel structure plus the size of our pt_io_channel struct
379 : * that we passed in when we registered our IO device. It will then call
380 : * our channel create callback to populate any elements that we need to
381 : * update.
382 : */
383 0 : pt_ch = spdk_get_io_channel(pt_node);
384 :
385 0 : return pt_ch;
386 : }
387 :
388 : /* This is the output for bdev_get_bdevs() for this vbdev */
389 : static int
390 0 : vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
391 : {
392 0 : struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
393 :
394 0 : spdk_json_write_name(w, "passthru");
395 0 : spdk_json_write_object_begin(w);
396 0 : spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev));
397 0 : spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev));
398 0 : spdk_json_write_object_end(w);
399 :
400 0 : return 0;
401 : }
402 :
403 : /* This is used to generate JSON that can configure this module to its current state. */
404 : static int
405 0 : vbdev_passthru_config_json(struct spdk_json_write_ctx *w)
406 : {
407 : struct vbdev_passthru *pt_node;
408 :
409 0 : TAILQ_FOREACH(pt_node, &g_pt_nodes, link) {
410 0 : const struct spdk_uuid *uuid = spdk_bdev_get_uuid(&pt_node->pt_bdev);
411 :
412 0 : spdk_json_write_object_begin(w);
413 0 : spdk_json_write_named_string(w, "method", "bdev_passthru_create");
414 0 : spdk_json_write_named_object_begin(w, "params");
415 0 : spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev));
416 0 : spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev));
417 0 : if (!spdk_uuid_is_null(uuid)) {
418 0 : spdk_json_write_named_uuid(w, "uuid", uuid);
419 : }
420 0 : spdk_json_write_object_end(w);
421 0 : spdk_json_write_object_end(w);
422 : }
423 0 : return 0;
424 : }
425 :
426 : /* We provide this callback for the SPDK channel code to create a channel using
427 : * the channel struct we provided in our module get_io_channel() entry point. Here
428 : * we get and save off an underlying base channel of the device below us so that
429 : * we can communicate with the base bdev on a per channel basis. If we needed
430 : * our own poller for this vbdev, we'd register it here.
431 : */
432 : static int
433 0 : pt_bdev_ch_create_cb(void *io_device, void *ctx_buf)
434 : {
435 0 : struct pt_io_channel *pt_ch = ctx_buf;
436 0 : struct vbdev_passthru *pt_node = io_device;
437 :
438 0 : pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc);
439 :
440 0 : return 0;
441 : }
442 :
443 : /* We provide this callback for the SPDK channel code to destroy a channel
444 : * created with our create callback. We just need to undo anything we did
445 : * when we created. If this bdev used its own poller, we'd unregister it here.
446 : */
447 : static void
448 0 : pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
449 : {
450 0 : struct pt_io_channel *pt_ch = ctx_buf;
451 :
452 0 : spdk_put_io_channel(pt_ch->base_ch);
453 0 : }
454 :
455 : /* Create the passthru association from the bdev and vbdev name and insert
456 : * on the global list. */
457 : static int
458 0 : vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name,
459 : const struct spdk_uuid *uuid)
460 : {
461 : struct bdev_names *name;
462 :
463 0 : TAILQ_FOREACH(name, &g_bdev_names, link) {
464 0 : if (strcmp(vbdev_name, name->vbdev_name) == 0) {
465 0 : SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name);
466 0 : return -EEXIST;
467 : }
468 : }
469 :
470 0 : name = calloc(1, sizeof(struct bdev_names));
471 0 : if (!name) {
472 0 : SPDK_ERRLOG("could not allocate bdev_names\n");
473 0 : return -ENOMEM;
474 : }
475 :
476 0 : name->bdev_name = strdup(bdev_name);
477 0 : if (!name->bdev_name) {
478 0 : SPDK_ERRLOG("could not allocate name->bdev_name\n");
479 0 : free(name);
480 0 : return -ENOMEM;
481 : }
482 :
483 0 : name->vbdev_name = strdup(vbdev_name);
484 0 : if (!name->vbdev_name) {
485 0 : SPDK_ERRLOG("could not allocate name->vbdev_name\n");
486 0 : free(name->bdev_name);
487 0 : free(name);
488 0 : return -ENOMEM;
489 : }
490 :
491 0 : spdk_uuid_copy(&name->uuid, uuid);
492 0 : TAILQ_INSERT_TAIL(&g_bdev_names, name, link);
493 :
494 0 : return 0;
495 : }
496 :
497 : /* On init, just perform bdev module specific initialization. */
498 : static int
499 0 : vbdev_passthru_init(void)
500 : {
501 0 : return 0;
502 : }
503 :
504 : /* Called when the entire module is being torn down. */
505 : static void
506 0 : vbdev_passthru_finish(void)
507 : {
508 : struct bdev_names *name;
509 :
510 0 : while ((name = TAILQ_FIRST(&g_bdev_names))) {
511 0 : TAILQ_REMOVE(&g_bdev_names, name, link);
512 0 : free(name->bdev_name);
513 0 : free(name->vbdev_name);
514 0 : free(name);
515 : }
516 0 : }
517 :
518 : /* During init we'll be asked how much memory we'd like passed to us
519 : * in bev_io structures as context. Here's where we specify how
520 : * much context we want per IO.
521 : */
522 : static int
523 0 : vbdev_passthru_get_ctx_size(void)
524 : {
525 0 : return sizeof(struct passthru_bdev_io);
526 : }
527 :
528 : /* Where vbdev_passthru_config_json() is used to generate per module JSON config data, this
529 : * function is called to output any per bdev specific methods. For the PT module, there are
530 : * none.
531 : */
532 : static void
533 0 : vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
534 : {
535 : /* No config per bdev needed */
536 0 : }
537 :
538 : static int
539 0 : vbdev_passthru_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
540 : {
541 0 : struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
542 :
543 : /* Passthru bdev doesn't work with data buffers, so it supports any memory domain used by base_bdev */
544 0 : return spdk_bdev_get_memory_domains(pt_node->base_bdev, domains, array_size);
545 : }
546 :
547 : /* When we register our bdev this is how we specify our entry points. */
548 : static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = {
549 : .destruct = vbdev_passthru_destruct,
550 : .submit_request = vbdev_passthru_submit_request,
551 : .io_type_supported = vbdev_passthru_io_type_supported,
552 : .get_io_channel = vbdev_passthru_get_io_channel,
553 : .dump_info_json = vbdev_passthru_dump_info_json,
554 : .write_config_json = vbdev_passthru_write_config_json,
555 : .get_memory_domains = vbdev_passthru_get_memory_domains,
556 : };
557 :
558 : static void
559 0 : vbdev_passthru_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find)
560 : {
561 : struct vbdev_passthru *pt_node, *tmp;
562 :
563 0 : TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) {
564 0 : if (bdev_find == pt_node->base_bdev) {
565 0 : spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL);
566 : }
567 : }
568 0 : }
569 :
570 : /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */
571 : static void
572 0 : vbdev_passthru_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
573 : void *event_ctx)
574 : {
575 0 : switch (type) {
576 0 : case SPDK_BDEV_EVENT_REMOVE:
577 0 : vbdev_passthru_base_bdev_hotremove_cb(bdev);
578 0 : break;
579 0 : default:
580 0 : SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
581 0 : break;
582 : }
583 0 : }
584 :
585 : /* Create and register the passthru vbdev if we find it in our list of bdev names.
586 : * This can be called either by the examine path or RPC method.
587 : */
588 : static int
589 0 : vbdev_passthru_register(const char *bdev_name)
590 : {
591 : struct bdev_names *name;
592 : struct vbdev_passthru *pt_node;
593 : struct spdk_bdev *bdev;
594 0 : struct spdk_uuid ns_uuid;
595 0 : int rc = 0;
596 :
597 0 : spdk_uuid_parse(&ns_uuid, BDEV_PASSTHRU_NAMESPACE_UUID);
598 :
599 : /* Check our list of names from config versus this bdev and if
600 : * there's a match, create the pt_node & bdev accordingly.
601 : */
602 0 : TAILQ_FOREACH(name, &g_bdev_names, link) {
603 0 : if (strcmp(name->bdev_name, bdev_name) != 0) {
604 0 : continue;
605 : }
606 :
607 0 : SPDK_NOTICELOG("Match on %s\n", bdev_name);
608 0 : pt_node = calloc(1, sizeof(struct vbdev_passthru));
609 0 : if (!pt_node) {
610 0 : rc = -ENOMEM;
611 0 : SPDK_ERRLOG("could not allocate pt_node\n");
612 0 : break;
613 : }
614 :
615 0 : pt_node->pt_bdev.name = strdup(name->vbdev_name);
616 0 : if (!pt_node->pt_bdev.name) {
617 0 : rc = -ENOMEM;
618 0 : SPDK_ERRLOG("could not allocate pt_bdev name\n");
619 0 : free(pt_node);
620 0 : break;
621 : }
622 0 : pt_node->pt_bdev.product_name = "passthru";
623 :
624 : /* The base bdev that we're attaching to. */
625 0 : rc = spdk_bdev_open_ext(bdev_name, true, vbdev_passthru_base_bdev_event_cb,
626 : NULL, &pt_node->base_desc);
627 0 : if (rc) {
628 0 : if (rc != -ENODEV) {
629 0 : SPDK_ERRLOG("could not open bdev %s\n", bdev_name);
630 : }
631 0 : free(pt_node->pt_bdev.name);
632 0 : free(pt_node);
633 0 : break;
634 : }
635 0 : SPDK_NOTICELOG("base bdev opened\n");
636 :
637 0 : bdev = spdk_bdev_desc_get_bdev(pt_node->base_desc);
638 0 : pt_node->base_bdev = bdev;
639 :
640 0 : if (!spdk_uuid_is_null(&name->uuid)) {
641 : /* Use the configured UUID */
642 0 : spdk_uuid_copy(&pt_node->pt_bdev.uuid, &name->uuid);
643 : } else {
644 : /* Generate UUID based on namespace UUID + base bdev UUID. */
645 0 : rc = spdk_uuid_generate_sha1(&pt_node->pt_bdev.uuid, &ns_uuid,
646 0 : (const char *)&pt_node->base_bdev->uuid, sizeof(struct spdk_uuid));
647 0 : if (rc) {
648 0 : SPDK_ERRLOG("Unable to generate new UUID for passthru bdev\n");
649 0 : spdk_bdev_close(pt_node->base_desc);
650 0 : free(pt_node->pt_bdev.name);
651 0 : free(pt_node);
652 0 : break;
653 : }
654 : }
655 :
656 : /* Copy some properties from the underlying base bdev. */
657 0 : pt_node->pt_bdev.write_cache = bdev->write_cache;
658 0 : pt_node->pt_bdev.required_alignment = bdev->required_alignment;
659 0 : pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary;
660 0 : pt_node->pt_bdev.blocklen = bdev->blocklen;
661 0 : pt_node->pt_bdev.blockcnt = bdev->blockcnt;
662 :
663 0 : pt_node->pt_bdev.md_interleave = bdev->md_interleave;
664 0 : pt_node->pt_bdev.md_len = bdev->md_len;
665 0 : pt_node->pt_bdev.dif_type = bdev->dif_type;
666 0 : pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md;
667 0 : pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags;
668 0 : pt_node->pt_bdev.dif_pi_format = bdev->dif_pi_format;
669 :
670 : /* This is the context that is passed to us when the bdev
671 : * layer calls in so we'll save our pt_bdev node here.
672 : */
673 0 : pt_node->pt_bdev.ctxt = pt_node;
674 0 : pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table;
675 0 : pt_node->pt_bdev.module = &passthru_if;
676 0 : TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link);
677 :
678 0 : spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb,
679 : sizeof(struct pt_io_channel),
680 0 : name->vbdev_name);
681 0 : SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node);
682 :
683 : /* Save the thread where the base device is opened */
684 0 : pt_node->thread = spdk_get_thread();
685 :
686 0 : rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module);
687 0 : if (rc) {
688 0 : SPDK_ERRLOG("could not claim bdev %s\n", bdev_name);
689 0 : spdk_bdev_close(pt_node->base_desc);
690 0 : TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
691 0 : spdk_io_device_unregister(pt_node, NULL);
692 0 : free(pt_node->pt_bdev.name);
693 0 : free(pt_node);
694 0 : break;
695 : }
696 0 : SPDK_NOTICELOG("bdev claimed\n");
697 :
698 0 : rc = spdk_bdev_register(&pt_node->pt_bdev);
699 0 : if (rc) {
700 0 : SPDK_ERRLOG("could not register pt_bdev\n");
701 0 : spdk_bdev_module_release_bdev(&pt_node->pt_bdev);
702 0 : spdk_bdev_close(pt_node->base_desc);
703 0 : TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
704 0 : spdk_io_device_unregister(pt_node, NULL);
705 0 : free(pt_node->pt_bdev.name);
706 0 : free(pt_node);
707 0 : break;
708 : }
709 0 : SPDK_NOTICELOG("pt_bdev registered\n");
710 0 : SPDK_NOTICELOG("created pt_bdev for: %s\n", name->vbdev_name);
711 : }
712 :
713 0 : return rc;
714 : }
715 :
716 : /* Create the passthru disk from the given bdev and vbdev name. */
717 : int
718 0 : bdev_passthru_create_disk(const char *bdev_name, const char *vbdev_name,
719 : const struct spdk_uuid *uuid)
720 : {
721 : int rc;
722 :
723 : /* Insert the bdev name into our global name list even if it doesn't exist yet,
724 : * it may show up soon...
725 : */
726 0 : rc = vbdev_passthru_insert_name(bdev_name, vbdev_name, uuid);
727 0 : if (rc) {
728 0 : return rc;
729 : }
730 :
731 0 : rc = vbdev_passthru_register(bdev_name);
732 0 : if (rc == -ENODEV) {
733 : /* This is not an error, we tracked the name above and it still
734 : * may show up later.
735 : */
736 0 : SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n");
737 0 : rc = 0;
738 : }
739 :
740 0 : return rc;
741 : }
742 :
743 : void
744 0 : bdev_passthru_delete_disk(const char *bdev_name, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
745 : {
746 : struct bdev_names *name;
747 : int rc;
748 :
749 : /* Some cleanup happens in the destruct callback. */
750 0 : rc = spdk_bdev_unregister_by_name(bdev_name, &passthru_if, cb_fn, cb_arg);
751 0 : if (rc == 0) {
752 : /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the
753 : * vbdev does not get re-created if the same bdev is constructed at some other time,
754 : * unless the underlying bdev was hot-removed.
755 : */
756 0 : TAILQ_FOREACH(name, &g_bdev_names, link) {
757 0 : if (strcmp(name->vbdev_name, bdev_name) == 0) {
758 0 : TAILQ_REMOVE(&g_bdev_names, name, link);
759 0 : free(name->bdev_name);
760 0 : free(name->vbdev_name);
761 0 : free(name);
762 0 : break;
763 : }
764 : }
765 : } else {
766 0 : cb_fn(cb_arg, rc);
767 : }
768 0 : }
769 :
770 : /* Because we specified this function in our pt bdev function table when we
771 : * registered our pt bdev, we'll get this call anytime a new bdev shows up.
772 : * Here we need to decide if we care about it and if so what to do. We
773 : * parsed the config file at init so we check the new bdev against the list
774 : * we built up at that time and if the user configured us to attach to this
775 : * bdev, here's where we do it.
776 : */
777 : static void
778 0 : vbdev_passthru_examine(struct spdk_bdev *bdev)
779 : {
780 0 : vbdev_passthru_register(bdev->name);
781 :
782 0 : spdk_bdev_module_examine_done(&passthru_if);
783 0 : }
784 :
785 0 : SPDK_LOG_REGISTER_COMPONENT(vbdev_passthru)
|