Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES.
5 : * All rights reserved.
6 : */
7 :
8 : #include "vbdev_crypto.h"
9 :
10 : #include "spdk_internal/assert.h"
11 : #include "spdk/thread.h"
12 : #include "spdk/bdev_module.h"
13 : #include "spdk/likely.h"
14 :
15 : /* This namespace UUID was generated using uuid_generate() method. */
16 : #define BDEV_CRYPTO_NAMESPACE_UUID "078e3cf7-f4b4-4545-b2c3-d40045a64ae2"
17 :
18 : struct bdev_names {
19 : struct vbdev_crypto_opts *opts;
20 : TAILQ_ENTRY(bdev_names) link;
21 : };
22 :
23 : /* List of crypto_bdev names and their base bdevs via configuration file. */
24 : static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names);
25 :
26 : struct vbdev_crypto {
27 : struct spdk_bdev *base_bdev; /* the thing we're attaching to */
28 : struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */
29 : struct spdk_bdev crypto_bdev; /* the crypto virtual bdev */
30 : struct vbdev_crypto_opts *opts; /* crypto options such as names and DEK */
31 : TAILQ_ENTRY(vbdev_crypto) link;
32 : struct spdk_thread *thread; /* thread where base device is opened */
33 : };
34 :
35 : /* List of virtual bdevs and associated info for each. We keep the device friendly name here even
36 : * though its also in the device struct because we use it early on.
37 : */
38 : static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbdev_crypto);
39 :
40 : /* The crypto vbdev channel struct. It is allocated and freed on my behalf by the io channel code.
41 : * We store things in here that are needed on per thread basis like the base_channel for this thread.
42 : */
43 : struct crypto_io_channel {
44 : struct spdk_io_channel *base_ch; /* IO channel of base device */
45 : struct spdk_io_channel *accel_channel; /* Accel engine channel used for crypto ops */
46 : struct spdk_accel_crypto_key *crypto_key;
47 : };
48 :
49 : enum crypto_io_resubmit_state {
50 : CRYPTO_IO_DECRYPT_DONE, /* Appended decrypt, need to read */
51 : CRYPTO_IO_ENCRYPT_DONE, /* Need to write */
52 : };
53 :
54 : /* This is the crypto per IO context that the bdev layer allocates for us opaquely and attaches to
55 : * each IO for us.
56 : */
57 : struct crypto_bdev_io {
58 : struct crypto_io_channel *crypto_ch; /* need to store for crypto completion handling */
59 : struct vbdev_crypto *crypto_bdev; /* the crypto node struct associated with this IO */
60 : /* Used for the single contiguous buffer that serves as the crypto destination target for writes */
61 : uint64_t aux_num_blocks; /* num of blocks for the contiguous buffer */
62 : uint64_t aux_offset_blocks; /* block offset on media */
63 : void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */
64 : struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */
65 : struct spdk_memory_domain *aux_domain; /* memory domain of the aux buf */
66 : void *aux_domain_ctx; /* memory domain ctx of the aux buf */
67 : struct spdk_accel_sequence *seq; /* sequence of accel operations */
68 :
69 : /* for bdev_io_wait */
70 : struct spdk_bdev_io_wait_entry bdev_io_wait;
71 : enum crypto_io_resubmit_state resubmit_state;
72 : };
73 :
74 : static void vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io,
75 : enum crypto_io_resubmit_state state);
76 : static void _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
77 : static void vbdev_crypto_examine(struct spdk_bdev *bdev);
78 : static int vbdev_crypto_claim(const char *bdev_name);
79 : static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
80 :
81 : static void
82 7 : crypto_io_fail(struct crypto_bdev_io *crypto_io)
83 : {
84 7 : struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(crypto_io);
85 7 : struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch;
86 :
87 7 : if (crypto_io->aux_buf_raw) {
88 2 : spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
89 : crypto_io->aux_domain, crypto_io->aux_domain_ctx);
90 : }
91 :
92 : /* This function can only be used to fail an IO that hasn't been sent to the base bdev,
93 : * otherwise accel sequence might have already been executed/aborted. */
94 7 : spdk_accel_sequence_abort(crypto_io->seq);
95 7 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
96 7 : }
97 :
98 : static void
99 6 : crypto_write(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
100 : {
101 6 : struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
102 : crypto_bdev);
103 6 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
104 6 : struct spdk_bdev_ext_io_opts opts = {};
105 : int rc;
106 :
107 6 : opts.size = sizeof(opts);
108 6 : opts.accel_sequence = crypto_io->seq;
109 6 : opts.memory_domain = crypto_io->aux_domain;
110 6 : opts.memory_domain_ctx = crypto_io->aux_domain_ctx;
111 :
112 : /* Write the encrypted data. */
113 6 : rc = spdk_bdev_writev_blocks_ext(crypto_bdev->base_desc, crypto_ch->base_ch,
114 : &crypto_io->aux_buf_iov, 1, crypto_io->aux_offset_blocks,
115 : crypto_io->aux_num_blocks, _complete_internal_io,
116 : bdev_io, &opts);
117 6 : if (spdk_unlikely(rc != 0)) {
118 3 : if (rc == -ENOMEM) {
119 1 : SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
120 1 : vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_ENCRYPT_DONE);
121 : } else {
122 2 : SPDK_ERRLOG("Failed to submit bdev_io!\n");
123 2 : crypto_io_fail(crypto_io);
124 : }
125 : }
126 6 : }
127 :
128 : /* We're either encrypting on the way down or decrypting on the way back. */
129 : static void
130 6 : crypto_encrypt(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
131 : {
132 6 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
133 6 : uint32_t blocklen = crypto_io->crypto_bdev->crypto_bdev.blocklen;
134 : uint64_t total_length;
135 : uint64_t alignment;
136 6 : void *aux_buf = crypto_io->aux_buf_raw;
137 : int rc;
138 :
139 : /* For encryption, we need to prepare a single contiguous buffer as the encryption
140 : * destination, we'll then pass that along for the write after encryption is done.
141 : * This is done to avoiding encrypting the provided write buffer which may be
142 : * undesirable in some use cases.
143 : */
144 6 : total_length = bdev_io->u.bdev.num_blocks * blocklen;
145 6 : alignment = spdk_bdev_get_buf_align(&crypto_io->crypto_bdev->crypto_bdev);
146 6 : crypto_io->aux_buf_iov.iov_len = total_length;
147 6 : crypto_io->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~
148 : (alignment - 1));
149 6 : crypto_io->aux_offset_blocks = bdev_io->u.bdev.offset_blocks;
150 6 : crypto_io->aux_num_blocks = bdev_io->u.bdev.num_blocks;
151 :
152 12 : rc = spdk_accel_append_encrypt(&crypto_io->seq, crypto_ch->accel_channel,
153 : crypto_ch->crypto_key, &crypto_io->aux_buf_iov, 1,
154 : crypto_io->aux_domain, crypto_io->aux_domain_ctx,
155 6 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
156 : bdev_io->u.bdev.memory_domain,
157 : bdev_io->u.bdev.memory_domain_ctx,
158 : bdev_io->u.bdev.offset_blocks, blocklen,
159 : NULL, NULL);
160 6 : if (spdk_unlikely(rc != 0)) {
161 2 : spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
162 : crypto_io->aux_domain, crypto_io->aux_domain_ctx);
163 2 : if (rc == -ENOMEM) {
164 1 : SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
165 1 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
166 : } else {
167 1 : SPDK_ERRLOG("Failed to submit bdev_io!\n");
168 1 : crypto_io_fail(crypto_io);
169 : }
170 :
171 2 : return;
172 : }
173 :
174 4 : crypto_write(crypto_ch, bdev_io);
175 : }
176 :
177 : static void
178 8 : _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
179 : {
180 8 : struct spdk_bdev_io *orig_io = cb_arg;
181 8 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)orig_io->driver_ctx;
182 8 : struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch;
183 :
184 8 : if (crypto_io->aux_buf_raw) {
185 2 : spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
186 : crypto_io->aux_domain, crypto_io->aux_domain_ctx);
187 : }
188 :
189 8 : spdk_bdev_io_complete_base_io_status(orig_io, bdev_io);
190 8 : spdk_bdev_free_io(bdev_io);
191 8 : }
192 :
193 : static void crypto_read(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io);
194 :
195 : static void
196 0 : vbdev_crypto_resubmit_io(void *arg)
197 : {
198 0 : struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
199 0 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
200 :
201 0 : switch (crypto_io->resubmit_state) {
202 0 : case CRYPTO_IO_ENCRYPT_DONE:
203 0 : crypto_write(crypto_io->crypto_ch, bdev_io);
204 0 : break;
205 0 : case CRYPTO_IO_DECRYPT_DONE:
206 0 : crypto_read(crypto_io->crypto_ch, bdev_io);
207 0 : break;
208 0 : default:
209 0 : SPDK_UNREACHABLE();
210 : }
211 0 : }
212 :
213 : static void
214 2 : vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io, enum crypto_io_resubmit_state state)
215 : {
216 2 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
217 : int rc;
218 :
219 2 : crypto_io->bdev_io_wait.bdev = bdev_io->bdev;
220 2 : crypto_io->bdev_io_wait.cb_fn = vbdev_crypto_resubmit_io;
221 2 : crypto_io->bdev_io_wait.cb_arg = bdev_io;
222 2 : crypto_io->resubmit_state = state;
223 :
224 2 : rc = spdk_bdev_queue_io_wait(bdev_io->bdev, crypto_io->crypto_ch->base_ch,
225 : &crypto_io->bdev_io_wait);
226 2 : if (rc != 0) {
227 0 : SPDK_ERRLOG("Queue io failed in vbdev_crypto_queue_io, rc=%d.\n", rc);
228 0 : crypto_io_fail(crypto_io);
229 : }
230 2 : }
231 :
232 : static void
233 4 : crypto_read(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
234 : {
235 4 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
236 4 : struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
237 : crypto_bdev);
238 4 : struct spdk_bdev_ext_io_opts opts = {};
239 : int rc;
240 :
241 4 : opts.size = sizeof(opts);
242 4 : opts.accel_sequence = crypto_io->seq;
243 4 : opts.memory_domain = bdev_io->u.bdev.memory_domain;
244 4 : opts.memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
245 :
246 4 : rc = spdk_bdev_readv_blocks_ext(crypto_bdev->base_desc, crypto_ch->base_ch,
247 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
248 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
249 : _complete_internal_io, bdev_io, &opts);
250 4 : if (rc != 0) {
251 2 : if (rc == -ENOMEM) {
252 1 : SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
253 1 : vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_DECRYPT_DONE);
254 : } else {
255 1 : SPDK_ERRLOG("Failed to submit bdev_io!\n");
256 1 : crypto_io_fail(crypto_io);
257 : }
258 : }
259 4 : }
260 :
261 : /* Callback for getting a buf from the bdev pool in the event that the caller passed
262 : * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
263 : * beneath us before we're done with it.
264 : */
265 : static void
266 5 : crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
267 : bool success)
268 : {
269 5 : struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch);
270 5 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
271 5 : uint32_t blocklen = crypto_io->crypto_bdev->crypto_bdev.blocklen;
272 : int rc;
273 :
274 5 : if (!success) {
275 0 : crypto_io_fail(crypto_io);
276 0 : return;
277 : }
278 :
279 15 : rc = spdk_accel_append_decrypt(&crypto_io->seq, crypto_ch->accel_channel,
280 : crypto_ch->crypto_key,
281 5 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
282 : bdev_io->u.bdev.memory_domain,
283 : bdev_io->u.bdev.memory_domain_ctx,
284 5 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
285 : bdev_io->u.bdev.memory_domain,
286 : bdev_io->u.bdev.memory_domain_ctx,
287 : bdev_io->u.bdev.offset_blocks, blocklen,
288 : NULL, NULL);
289 5 : if (rc != 0) {
290 1 : if (rc == -ENOMEM) {
291 1 : SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
292 1 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
293 : } else {
294 0 : SPDK_ERRLOG("Failed to submit bdev_io!\n");
295 0 : crypto_io_fail(crypto_io);
296 : }
297 :
298 1 : return;
299 : }
300 :
301 4 : crypto_read(crypto_ch, bdev_io);
302 : }
303 :
304 : /* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto,
305 : * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO
306 : * and call our cpl callback provided below along with the original bdev_io so that we can
307 : * complete it once this IO completes. For crypto operations, we'll either encrypt it first
308 : * (writes) then call back into bdev to submit it or we'll submit a read and then catch it
309 : * on the way back for decryption.
310 : */
311 : static void
312 16 : vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
313 : {
314 16 : struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
315 : crypto_bdev);
316 16 : struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch);
317 16 : struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
318 16 : int rc = 0;
319 :
320 16 : memset(crypto_io, 0, sizeof(struct crypto_bdev_io));
321 16 : crypto_io->crypto_bdev = crypto_bdev;
322 16 : crypto_io->crypto_ch = crypto_ch;
323 16 : crypto_io->seq = bdev_io->u.bdev.accel_sequence;
324 :
325 16 : switch (bdev_io->type) {
326 5 : case SPDK_BDEV_IO_TYPE_READ:
327 5 : spdk_bdev_io_get_buf(bdev_io, crypto_read_get_buf_cb,
328 5 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
329 5 : break;
330 6 : case SPDK_BDEV_IO_TYPE_WRITE:
331 : /* For encryption we don't want to encrypt the data in place as the host isn't
332 : * expecting us to mangle its data buffers so we need to encrypt into the aux accel
333 : * buffer, then we can use that as the source for the disk data transfer.
334 : */
335 12 : rc = spdk_accel_get_buf(crypto_ch->accel_channel,
336 6 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen,
337 : &crypto_io->aux_buf_raw, &crypto_io->aux_domain,
338 : &crypto_io->aux_domain_ctx);
339 6 : if (rc == 0) {
340 6 : crypto_encrypt(crypto_ch, bdev_io);
341 : }
342 6 : break;
343 2 : case SPDK_BDEV_IO_TYPE_UNMAP:
344 2 : rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch,
345 : bdev_io->u.bdev.offset_blocks,
346 : bdev_io->u.bdev.num_blocks,
347 : _complete_internal_io, bdev_io);
348 2 : break;
349 2 : case SPDK_BDEV_IO_TYPE_FLUSH:
350 2 : rc = spdk_bdev_flush_blocks(crypto_bdev->base_desc, crypto_ch->base_ch,
351 : bdev_io->u.bdev.offset_blocks,
352 : bdev_io->u.bdev.num_blocks,
353 : _complete_internal_io, bdev_io);
354 2 : break;
355 0 : case SPDK_BDEV_IO_TYPE_RESET:
356 0 : rc = spdk_bdev_reset(crypto_bdev->base_desc, crypto_ch->base_ch,
357 : _complete_internal_io, bdev_io);
358 0 : break;
359 1 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
360 : default:
361 1 : SPDK_ERRLOG("crypto: unknown I/O type %d\n", bdev_io->type);
362 1 : rc = -EINVAL;
363 1 : break;
364 : }
365 :
366 16 : if (rc != 0) {
367 3 : if (rc == -ENOMEM) {
368 0 : SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
369 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
370 : } else {
371 3 : SPDK_ERRLOG("Failed to submit bdev_io!\n");
372 3 : crypto_io_fail(crypto_io);
373 : }
374 : }
375 16 : }
376 :
377 : /* We'll just call the base bdev and let it answer except for WZ command which
378 : * we always say we don't support so that the bdev layer will actually send us
379 : * real writes that we can encrypt.
380 : */
381 : static bool
382 1 : vbdev_crypto_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
383 : {
384 1 : struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
385 :
386 1 : switch (io_type) {
387 0 : case SPDK_BDEV_IO_TYPE_WRITE:
388 : case SPDK_BDEV_IO_TYPE_UNMAP:
389 : case SPDK_BDEV_IO_TYPE_RESET:
390 : case SPDK_BDEV_IO_TYPE_READ:
391 : case SPDK_BDEV_IO_TYPE_FLUSH:
392 0 : return spdk_bdev_io_type_supported(crypto_bdev->base_bdev, io_type);
393 1 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
394 : /* Force the bdev layer to issue actual writes of zeroes so we can
395 : * encrypt them as regular writes.
396 : */
397 : default:
398 1 : return false;
399 : }
400 : }
401 :
402 : /* Callback for unregistering the IO device. */
403 : static void
404 0 : _device_unregister_cb(void *io_device)
405 : {
406 0 : struct vbdev_crypto *crypto_bdev = io_device;
407 :
408 : /* Done with this crypto_bdev. */
409 0 : crypto_bdev->opts = NULL;
410 :
411 0 : spdk_bdev_destruct_done(&crypto_bdev->crypto_bdev, 0);
412 0 : free(crypto_bdev->crypto_bdev.name);
413 0 : free(crypto_bdev);
414 0 : }
415 :
416 : /* Wrapper for the bdev close operation. */
417 : static void
418 0 : _vbdev_crypto_destruct(void *ctx)
419 : {
420 0 : struct spdk_bdev_desc *desc = ctx;
421 :
422 0 : spdk_bdev_close(desc);
423 0 : }
424 :
425 : /* Called after we've unregistered following a hot remove callback.
426 : * Our finish entry point will be called next.
427 : */
428 : static int
429 0 : vbdev_crypto_destruct(void *ctx)
430 : {
431 0 : struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
432 :
433 : /* Remove this device from the internal list */
434 0 : TAILQ_REMOVE(&g_vbdev_crypto, crypto_bdev, link);
435 :
436 : /* Unclaim the underlying bdev. */
437 0 : spdk_bdev_module_release_bdev(crypto_bdev->base_bdev);
438 :
439 : /* Close the underlying bdev on its same opened thread. */
440 0 : if (crypto_bdev->thread && crypto_bdev->thread != spdk_get_thread()) {
441 0 : spdk_thread_send_msg(crypto_bdev->thread, _vbdev_crypto_destruct, crypto_bdev->base_desc);
442 : } else {
443 0 : spdk_bdev_close(crypto_bdev->base_desc);
444 : }
445 :
446 : /* Unregister the io_device. */
447 0 : spdk_io_device_unregister(crypto_bdev, _device_unregister_cb);
448 :
449 0 : return 1;
450 : }
451 :
452 : /* We supplied this as an entry point for upper layers who want to communicate to this
453 : * bdev. This is how they get a channel. We are passed the same context we provided when
454 : * we created our crypto vbdev in examine() which, for this bdev, is the address of one of
455 : * our context nodes. From here we'll ask the SPDK channel code to fill out our channel
456 : * struct and we'll keep it in our crypto node.
457 : */
458 : static struct spdk_io_channel *
459 0 : vbdev_crypto_get_io_channel(void *ctx)
460 : {
461 0 : struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
462 :
463 : /* The IO channel code will allocate a channel for us which consists of
464 : * the SPDK channel structure plus the size of our crypto_io_channel struct
465 : * that we passed in when we registered our IO device. It will then call
466 : * our channel create callback to populate any elements that we need to
467 : * update.
468 : */
469 0 : return spdk_get_io_channel(crypto_bdev);
470 : }
471 :
472 : /* This is the output for bdev_get_bdevs() for this vbdev */
473 : static int
474 0 : vbdev_crypto_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
475 : {
476 0 : struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
477 :
478 0 : spdk_json_write_name(w, "crypto");
479 0 : spdk_json_write_object_begin(w);
480 0 : spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev));
481 0 : spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev));
482 0 : spdk_json_write_named_string(w, "key_name", crypto_bdev->opts->key->param.key_name);
483 0 : spdk_json_write_object_end(w);
484 :
485 0 : return 0;
486 : }
487 :
488 : static int
489 0 : vbdev_crypto_config_json(struct spdk_json_write_ctx *w)
490 : {
491 : struct vbdev_crypto *crypto_bdev;
492 :
493 0 : TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) {
494 0 : spdk_json_write_object_begin(w);
495 0 : spdk_json_write_named_string(w, "method", "bdev_crypto_create");
496 0 : spdk_json_write_named_object_begin(w, "params");
497 0 : spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev));
498 0 : spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev));
499 0 : spdk_json_write_named_string(w, "key_name", crypto_bdev->opts->key->param.key_name);
500 0 : spdk_json_write_object_end(w);
501 0 : spdk_json_write_object_end(w);
502 : }
503 0 : return 0;
504 : }
505 :
506 : /* We provide this callback for the SPDK channel code to create a channel using
507 : * the channel struct we provided in our module get_io_channel() entry point. Here
508 : * we get and save off an underlying base channel of the device below us so that
509 : * we can communicate with the base bdev on a per channel basis. We also register the
510 : * poller used to complete crypto operations from the device.
511 : */
512 : static int
513 0 : crypto_bdev_ch_create_cb(void *io_device, void *ctx_buf)
514 : {
515 0 : struct crypto_io_channel *crypto_ch = ctx_buf;
516 0 : struct vbdev_crypto *crypto_bdev = io_device;
517 :
518 0 : crypto_ch->base_ch = spdk_bdev_get_io_channel(crypto_bdev->base_desc);
519 0 : if (crypto_ch->base_ch == NULL) {
520 0 : SPDK_ERRLOG("Failed to get base bdev IO channel (bdev: %s)\n",
521 : crypto_bdev->crypto_bdev.name);
522 0 : return -ENOMEM;
523 : }
524 :
525 0 : crypto_ch->accel_channel = spdk_accel_get_io_channel();
526 0 : if (crypto_ch->accel_channel == NULL) {
527 0 : SPDK_ERRLOG("Failed to get accel IO channel (bdev: %s)\n",
528 : crypto_bdev->crypto_bdev.name);
529 0 : spdk_put_io_channel(crypto_ch->base_ch);
530 0 : return -ENOMEM;
531 : }
532 :
533 0 : crypto_ch->crypto_key = crypto_bdev->opts->key;
534 :
535 0 : return 0;
536 : }
537 :
538 : /* We provide this callback for the SPDK channel code to destroy a channel
539 : * created with our create callback. We just need to undo anything we did
540 : * when we created.
541 : */
542 : static void
543 0 : crypto_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
544 : {
545 0 : struct crypto_io_channel *crypto_ch = ctx_buf;
546 :
547 0 : spdk_put_io_channel(crypto_ch->base_ch);
548 0 : spdk_put_io_channel(crypto_ch->accel_channel);
549 0 : }
550 :
551 : /* Create the association from the bdev and vbdev name and insert
552 : * on the global list. */
553 : static int
554 0 : vbdev_crypto_insert_name(struct vbdev_crypto_opts *opts, struct bdev_names **out)
555 : {
556 : struct bdev_names *name;
557 :
558 0 : assert(opts);
559 0 : assert(out);
560 :
561 0 : TAILQ_FOREACH(name, &g_bdev_names, link) {
562 0 : if (strcmp(opts->vbdev_name, name->opts->vbdev_name) == 0) {
563 0 : SPDK_ERRLOG("Crypto bdev %s already exists\n", opts->vbdev_name);
564 0 : return -EEXIST;
565 : }
566 : }
567 :
568 0 : name = calloc(1, sizeof(struct bdev_names));
569 0 : if (!name) {
570 0 : SPDK_ERRLOG("Failed to allocate memory for bdev_names.\n");
571 0 : return -ENOMEM;
572 : }
573 :
574 0 : name->opts = opts;
575 0 : TAILQ_INSERT_TAIL(&g_bdev_names, name, link);
576 0 : *out = name;
577 :
578 0 : return 0;
579 : }
580 :
581 : void
582 0 : free_crypto_opts(struct vbdev_crypto_opts *opts)
583 : {
584 0 : free(opts->bdev_name);
585 0 : free(opts->vbdev_name);
586 0 : free(opts);
587 0 : }
588 :
589 : static void
590 0 : vbdev_crypto_delete_name(struct bdev_names *name)
591 : {
592 0 : TAILQ_REMOVE(&g_bdev_names, name, link);
593 0 : if (name->opts) {
594 0 : if (name->opts->key_owner && name->opts->key) {
595 0 : spdk_accel_crypto_key_destroy(name->opts->key);
596 : }
597 0 : free_crypto_opts(name->opts);
598 0 : name->opts = NULL;
599 : }
600 0 : free(name);
601 0 : }
602 :
603 : /* RPC entry point for crypto creation. */
604 : int
605 0 : create_crypto_disk(struct vbdev_crypto_opts *opts)
606 : {
607 0 : struct bdev_names *name = NULL;
608 : int rc;
609 :
610 0 : rc = vbdev_crypto_insert_name(opts, &name);
611 0 : if (rc) {
612 0 : return rc;
613 : }
614 :
615 0 : rc = vbdev_crypto_claim(opts->bdev_name);
616 0 : if (rc == -ENODEV) {
617 0 : SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n");
618 0 : rc = 0;
619 : }
620 :
621 0 : if (rc) {
622 0 : assert(name != NULL);
623 : /* In case of error we let the caller function to deallocate @opts
624 : * since it is its responsibility. Setting name->opts = NULL let's
625 : * vbdev_crypto_delete_name() know it does not have to do anything
626 : * about @opts.
627 : */
628 0 : name->opts = NULL;
629 0 : vbdev_crypto_delete_name(name);
630 : }
631 0 : return rc;
632 : }
633 :
634 : /* Called at driver init time, parses config file to prepare for examine calls,
635 : * also fully initializes the crypto drivers.
636 : */
637 : static int
638 0 : vbdev_crypto_init(void)
639 : {
640 0 : return 0;
641 : }
642 :
643 : /* Called when the entire module is being torn down. */
644 : static void
645 0 : vbdev_crypto_finish(void)
646 : {
647 : struct bdev_names *name;
648 :
649 0 : while ((name = TAILQ_FIRST(&g_bdev_names))) {
650 0 : vbdev_crypto_delete_name(name);
651 : }
652 0 : }
653 :
654 : /* During init we'll be asked how much memory we'd like passed to us
655 : * in bev_io structures as context. Here's where we specify how
656 : * much context we want per IO.
657 : */
658 : static int
659 0 : vbdev_crypto_get_ctx_size(void)
660 : {
661 0 : return sizeof(struct crypto_bdev_io);
662 : }
663 :
664 : static void
665 0 : vbdev_crypto_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find)
666 : {
667 : struct vbdev_crypto *crypto_bdev, *tmp;
668 :
669 0 : TAILQ_FOREACH_SAFE(crypto_bdev, &g_vbdev_crypto, link, tmp) {
670 0 : if (bdev_find == crypto_bdev->base_bdev) {
671 0 : spdk_bdev_unregister(&crypto_bdev->crypto_bdev, NULL, NULL);
672 : }
673 : }
674 0 : }
675 :
676 : static void
677 0 : vbdev_crypto_base_bdev_resize_cb(struct spdk_bdev *bdev_find)
678 : {
679 : struct vbdev_crypto *crypto_bdev;
680 :
681 0 : TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) {
682 0 : if (bdev_find == crypto_bdev->base_bdev) {
683 0 : spdk_bdev_notify_blockcnt_change(&crypto_bdev->crypto_bdev, bdev_find->blockcnt);
684 : }
685 : }
686 0 : }
687 :
688 : /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */
689 : static void
690 0 : vbdev_crypto_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
691 : void *event_ctx)
692 : {
693 0 : switch (type) {
694 0 : case SPDK_BDEV_EVENT_REMOVE:
695 0 : vbdev_crypto_base_bdev_hotremove_cb(bdev);
696 0 : break;
697 0 : case SPDK_BDEV_EVENT_RESIZE:
698 0 : vbdev_crypto_base_bdev_resize_cb(bdev);
699 0 : break;
700 0 : default:
701 0 : SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
702 0 : break;
703 : }
704 0 : }
705 :
706 : static int
707 0 : vbdev_crypto_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
708 : {
709 0 : struct spdk_memory_domain **accel_domains = NULL;
710 0 : int num_domains = 0, accel_rc, accel_array_size = 0;
711 :
712 : /* Report generic accel and encryption module's memory domains */
713 0 : if (domains && num_domains < array_size) {
714 0 : domains[num_domains] = spdk_accel_get_memory_domain();
715 : }
716 :
717 0 : num_domains++;
718 0 : if (domains && num_domains < array_size) {
719 0 : accel_domains = domains + num_domains;
720 0 : accel_array_size = array_size - num_domains;
721 : }
722 0 : accel_rc = spdk_accel_get_opc_memory_domains(SPDK_ACCEL_OPC_ENCRYPT, accel_domains,
723 : accel_array_size);
724 0 : if (accel_rc > 0) {
725 0 : num_domains += accel_rc;
726 : }
727 :
728 0 : return num_domains;
729 : }
730 :
731 : static bool
732 0 : vbdev_crypto_sequence_supported(void *ctx, enum spdk_bdev_io_type type)
733 : {
734 0 : switch (type) {
735 0 : case SPDK_BDEV_IO_TYPE_READ:
736 : case SPDK_BDEV_IO_TYPE_WRITE:
737 0 : return true;
738 0 : default:
739 0 : return false;
740 : }
741 : }
742 :
743 : /* When we register our bdev this is how we specify our entry points. */
744 : static const struct spdk_bdev_fn_table vbdev_crypto_fn_table = {
745 : .destruct = vbdev_crypto_destruct,
746 : .submit_request = vbdev_crypto_submit_request,
747 : .io_type_supported = vbdev_crypto_io_type_supported,
748 : .get_io_channel = vbdev_crypto_get_io_channel,
749 : .dump_info_json = vbdev_crypto_dump_info_json,
750 : .get_memory_domains = vbdev_crypto_get_memory_domains,
751 : .accel_sequence_supported = vbdev_crypto_sequence_supported,
752 : };
753 :
754 : static struct spdk_bdev_module crypto_if = {
755 : .name = "crypto",
756 : .module_init = vbdev_crypto_init,
757 : .get_ctx_size = vbdev_crypto_get_ctx_size,
758 : .examine_config = vbdev_crypto_examine,
759 : .module_fini = vbdev_crypto_finish,
760 : .config_json = vbdev_crypto_config_json
761 : };
762 :
763 1 : SPDK_BDEV_MODULE_REGISTER(crypto, &crypto_if)
764 :
765 : static int
766 0 : vbdev_crypto_claim(const char *bdev_name)
767 : {
768 : struct bdev_names *name;
769 : struct vbdev_crypto *vbdev;
770 : struct spdk_bdev *bdev;
771 0 : struct spdk_iobuf_opts iobuf_opts;
772 0 : struct spdk_accel_operation_exec_ctx opctx = {};
773 0 : struct spdk_uuid ns_uuid;
774 0 : int rc = 0;
775 :
776 0 : spdk_uuid_parse(&ns_uuid, BDEV_CRYPTO_NAMESPACE_UUID);
777 :
778 : /* Limit the max IO size by some reasonable value. Since in write operation we use aux buffer,
779 : * let's set the limit to the large_bufsize value */
780 0 : spdk_iobuf_get_opts(&iobuf_opts, sizeof(iobuf_opts));
781 :
782 : /* Check our list of names from config versus this bdev and if
783 : * there's a match, create the crypto_bdev & bdev accordingly.
784 : */
785 0 : TAILQ_FOREACH(name, &g_bdev_names, link) {
786 0 : if (strcmp(name->opts->bdev_name, bdev_name) != 0) {
787 0 : continue;
788 : }
789 0 : SPDK_DEBUGLOG(vbdev_crypto, "Match on %s\n", bdev_name);
790 :
791 0 : vbdev = calloc(1, sizeof(struct vbdev_crypto));
792 0 : if (!vbdev) {
793 0 : SPDK_ERRLOG("Failed to allocate memory for crypto_bdev.\n");
794 0 : return -ENOMEM;
795 : }
796 0 : vbdev->crypto_bdev.product_name = "crypto";
797 :
798 0 : vbdev->crypto_bdev.name = strdup(name->opts->vbdev_name);
799 0 : if (!vbdev->crypto_bdev.name) {
800 0 : SPDK_ERRLOG("Failed to allocate memory for crypto_bdev name.\n");
801 0 : rc = -ENOMEM;
802 0 : goto error_bdev_name;
803 : }
804 :
805 0 : rc = spdk_bdev_open_ext(bdev_name, true, vbdev_crypto_base_bdev_event_cb,
806 : NULL, &vbdev->base_desc);
807 0 : if (rc) {
808 0 : if (rc != -ENODEV) {
809 0 : SPDK_ERRLOG("Failed to open bdev %s: error %d\n", bdev_name, rc);
810 : }
811 0 : goto error_open;
812 : }
813 :
814 0 : bdev = spdk_bdev_desc_get_bdev(vbdev->base_desc);
815 0 : vbdev->base_bdev = bdev;
816 :
817 0 : vbdev->crypto_bdev.write_cache = bdev->write_cache;
818 0 : vbdev->crypto_bdev.optimal_io_boundary = bdev->optimal_io_boundary;
819 0 : vbdev->crypto_bdev.max_rw_size = spdk_min(
820 : bdev->max_rw_size ? bdev->max_rw_size : UINT32_MAX,
821 : iobuf_opts.large_bufsize / bdev->blocklen);
822 :
823 0 : opctx.size = SPDK_SIZEOF(&opctx, block_size);
824 0 : opctx.block_size = bdev->blocklen;
825 0 : vbdev->crypto_bdev.required_alignment =
826 0 : spdk_max(bdev->required_alignment,
827 : spdk_max(spdk_accel_get_buf_align(SPDK_ACCEL_OPC_ENCRYPT, &opctx),
828 : spdk_accel_get_buf_align(SPDK_ACCEL_OPC_DECRYPT, &opctx)));
829 :
830 0 : vbdev->crypto_bdev.blocklen = bdev->blocklen;
831 0 : vbdev->crypto_bdev.blockcnt = bdev->blockcnt;
832 :
833 : /* This is the context that is passed to us when the bdev
834 : * layer calls in so we'll save our crypto_bdev node here.
835 : */
836 0 : vbdev->crypto_bdev.ctxt = vbdev;
837 0 : vbdev->crypto_bdev.fn_table = &vbdev_crypto_fn_table;
838 0 : vbdev->crypto_bdev.module = &crypto_if;
839 :
840 : /* Assign crypto opts from the name. The pointer is valid up to the point
841 : * the module is unloaded and all names removed from the list. */
842 0 : vbdev->opts = name->opts;
843 :
844 : /* Generate UUID based on namespace UUID + base bdev UUID */
845 0 : rc = spdk_uuid_generate_sha1(&vbdev->crypto_bdev.uuid, &ns_uuid,
846 0 : (const char *)&vbdev->base_bdev->uuid, sizeof(struct spdk_uuid));
847 0 : if (rc) {
848 0 : SPDK_ERRLOG("Unable to generate new UUID for crypto bdev\n");
849 0 : goto error_uuid;
850 : }
851 :
852 0 : TAILQ_INSERT_TAIL(&g_vbdev_crypto, vbdev, link);
853 :
854 0 : spdk_io_device_register(vbdev, crypto_bdev_ch_create_cb, crypto_bdev_ch_destroy_cb,
855 0 : sizeof(struct crypto_io_channel), vbdev->crypto_bdev.name);
856 :
857 : /* Save the thread where the base device is opened */
858 0 : vbdev->thread = spdk_get_thread();
859 :
860 0 : rc = spdk_bdev_module_claim_bdev(bdev, vbdev->base_desc, vbdev->crypto_bdev.module);
861 0 : if (rc) {
862 0 : SPDK_ERRLOG("Failed to claim bdev %s\n", spdk_bdev_get_name(bdev));
863 0 : goto error_claim;
864 : }
865 :
866 0 : rc = spdk_bdev_register(&vbdev->crypto_bdev);
867 0 : if (rc < 0) {
868 0 : SPDK_ERRLOG("Failed to register vbdev: error %d\n", rc);
869 0 : rc = -EINVAL;
870 0 : goto error_bdev_register;
871 : }
872 0 : SPDK_DEBUGLOG(vbdev_crypto, "Registered io_device and virtual bdev for: %s\n",
873 : vbdev->opts->vbdev_name);
874 0 : break;
875 : }
876 :
877 0 : return rc;
878 :
879 : /* Error cleanup paths. */
880 0 : error_bdev_register:
881 0 : spdk_bdev_module_release_bdev(vbdev->base_bdev);
882 0 : error_claim:
883 0 : TAILQ_REMOVE(&g_vbdev_crypto, vbdev, link);
884 0 : spdk_io_device_unregister(vbdev, NULL);
885 0 : error_uuid:
886 0 : spdk_bdev_close(vbdev->base_desc);
887 0 : error_open:
888 0 : free(vbdev->crypto_bdev.name);
889 0 : error_bdev_name:
890 0 : free(vbdev);
891 :
892 0 : return rc;
893 : }
894 :
895 : struct crypto_delete_disk_ctx {
896 : spdk_delete_crypto_complete cb_fn;
897 : void *cb_arg;
898 : char *bdev_name;
899 : };
900 :
901 : static void
902 0 : delete_crypto_disk_bdev_name(void *ctx, int rc)
903 : {
904 : struct bdev_names *name;
905 0 : struct crypto_delete_disk_ctx *disk_ctx = ctx;
906 :
907 : /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the
908 : * vbdev does not get re-created if the same bdev is constructed at some other time,
909 : * unless the underlying bdev was hot-removed. */
910 0 : TAILQ_FOREACH(name, &g_bdev_names, link) {
911 0 : if (strcmp(name->opts->vbdev_name, disk_ctx->bdev_name) == 0) {
912 0 : vbdev_crypto_delete_name(name);
913 0 : break;
914 : }
915 : }
916 :
917 0 : disk_ctx->cb_fn(disk_ctx->cb_arg, rc);
918 :
919 0 : free(disk_ctx->bdev_name);
920 0 : free(disk_ctx);
921 0 : }
922 :
923 : /* RPC entry for deleting a crypto vbdev. */
924 : void
925 0 : delete_crypto_disk(const char *bdev_name, spdk_delete_crypto_complete cb_fn,
926 : void *cb_arg)
927 : {
928 : int rc;
929 : struct crypto_delete_disk_ctx *ctx;
930 :
931 0 : ctx = calloc(1, sizeof(struct crypto_delete_disk_ctx));
932 0 : if (!ctx) {
933 0 : SPDK_ERRLOG("Failed to allocate delete crypto disk ctx\n");
934 0 : cb_fn(cb_arg, -ENOMEM);
935 0 : return;
936 : }
937 :
938 0 : ctx->bdev_name = strdup(bdev_name);
939 0 : if (!ctx->bdev_name) {
940 0 : SPDK_ERRLOG("Failed to copy bdev_name\n");
941 0 : free(ctx);
942 0 : cb_fn(cb_arg, -ENOMEM);
943 0 : return;
944 : }
945 0 : ctx->cb_arg = cb_arg;
946 0 : ctx->cb_fn = cb_fn;
947 : /* Some cleanup happens in the destruct callback. */
948 0 : rc = spdk_bdev_unregister_by_name(bdev_name, &crypto_if, delete_crypto_disk_bdev_name, ctx);
949 0 : if (rc != 0) {
950 0 : SPDK_ERRLOG("Encountered an error during bdev unregistration\n");
951 0 : cb_fn(cb_arg, rc);
952 0 : free(ctx->bdev_name);
953 0 : free(ctx);
954 : }
955 : }
956 :
957 : /* Because we specified this function in our crypto bdev function table when we
958 : * registered our crypto bdev, we'll get this call anytime a new bdev shows up.
959 : * Here we need to decide if we care about it and if so what to do. We
960 : * parsed the config file at init so we check the new bdev against the list
961 : * we built up at that time and if the user configured us to attach to this
962 : * bdev, here's where we do it.
963 : */
964 : static void
965 0 : vbdev_crypto_examine(struct spdk_bdev *bdev)
966 : {
967 0 : vbdev_crypto_claim(spdk_bdev_get_name(bdev));
968 0 : spdk_bdev_module_examine_done(&crypto_if);
969 0 : }
970 :
971 1 : SPDK_LOG_REGISTER_COMPONENT(vbdev_crypto)
|