Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES
4 : * All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/accel_module.h"
10 : #include "accel_internal.h"
11 :
12 : #include "spdk/env.h"
13 : #include "spdk/likely.h"
14 : #include "spdk/log.h"
15 : #include "spdk/thread.h"
16 : #include "spdk/json.h"
17 : #include "spdk/crc32.h"
18 : #include "spdk/util.h"
19 : #include "spdk/xor.h"
20 : #include "spdk/dif.h"
21 :
22 : #ifdef SPDK_CONFIG_ISAL
23 : #include "../isa-l/include/igzip_lib.h"
24 : #ifdef SPDK_CONFIG_ISAL_CRYPTO
25 : #include "../isa-l-crypto/include/aes_xts.h"
26 : #endif
27 : #endif
28 :
29 : /* Per the AES-XTS spec, the size of data unit cannot be bigger than 2^20 blocks, 128b each block */
30 : #define ACCEL_AES_XTS_MAX_BLOCK_SIZE (1 << 24)
31 :
32 : struct sw_accel_io_channel {
33 : /* for ISAL */
34 : #ifdef SPDK_CONFIG_ISAL
35 : struct isal_zstream stream;
36 : struct inflate_state state;
37 : #endif
38 : struct spdk_poller *completion_poller;
39 : STAILQ_HEAD(, spdk_accel_task) tasks_to_complete;
40 : };
41 :
42 : typedef void (*sw_accel_crypto_op)(uint8_t *k2, uint8_t *k1, uint8_t *tweak, uint64_t lba_size,
43 : const uint8_t *src, uint8_t *dst);
44 :
45 : struct sw_accel_crypto_key_data {
46 : sw_accel_crypto_op encrypt;
47 : sw_accel_crypto_op decrypt;
48 : };
49 :
50 : static struct spdk_accel_module_if g_sw_module;
51 :
52 : static void sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *_key);
53 : static int sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key);
54 : static bool sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode);
55 : static bool sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size);
56 :
57 : /* Post SW completions to a list and complete in a poller as we don't want to
58 : * complete them on the caller's stack as they'll likely submit another. */
59 : inline static void
60 58 : _add_to_comp_list(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task, int status)
61 : {
62 58 : accel_task->status = status;
63 58 : STAILQ_INSERT_TAIL(&sw_ch->tasks_to_complete, accel_task, link);
64 58 : }
65 :
66 : static bool
67 14 : sw_accel_supports_opcode(enum spdk_accel_opcode opc)
68 : {
69 14 : switch (opc) {
70 14 : case SPDK_ACCEL_OPC_COPY:
71 : case SPDK_ACCEL_OPC_FILL:
72 : case SPDK_ACCEL_OPC_DUALCAST:
73 : case SPDK_ACCEL_OPC_COMPARE:
74 : case SPDK_ACCEL_OPC_CRC32C:
75 : case SPDK_ACCEL_OPC_COPY_CRC32C:
76 : case SPDK_ACCEL_OPC_COMPRESS:
77 : case SPDK_ACCEL_OPC_DECOMPRESS:
78 : case SPDK_ACCEL_OPC_ENCRYPT:
79 : case SPDK_ACCEL_OPC_DECRYPT:
80 : case SPDK_ACCEL_OPC_XOR:
81 : case SPDK_ACCEL_OPC_DIF_VERIFY:
82 : case SPDK_ACCEL_OPC_DIF_GENERATE:
83 : case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
84 14 : return true;
85 0 : default:
86 0 : return false;
87 : }
88 : }
89 :
90 : static int
91 1 : _sw_accel_dualcast_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
92 : struct iovec *dst2_iovs, uint32_t dst2_iovcnt,
93 : struct iovec *src_iovs, uint32_t src_iovcnt)
94 : {
95 1 : if (spdk_unlikely(dst_iovcnt != 1 || dst2_iovcnt != 1 || src_iovcnt != 1)) {
96 0 : return -EINVAL;
97 : }
98 :
99 1 : if (spdk_unlikely(dst_iovs[0].iov_len != src_iovs[0].iov_len ||
100 : dst_iovs[0].iov_len != dst2_iovs[0].iov_len)) {
101 0 : return -EINVAL;
102 : }
103 :
104 1 : memcpy(dst_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
105 1 : memcpy(dst2_iovs[0].iov_base, src_iovs[0].iov_base, dst_iovs[0].iov_len);
106 :
107 1 : return 0;
108 : }
109 :
110 : static void
111 8 : _sw_accel_copy_iovs(struct iovec *dst_iovs, uint32_t dst_iovcnt,
112 : struct iovec *src_iovs, uint32_t src_iovcnt)
113 : {
114 8 : struct spdk_ioviter iter;
115 8 : void *src, *dst;
116 : size_t len;
117 :
118 8 : for (len = spdk_ioviter_first(&iter, src_iovs, src_iovcnt,
119 : dst_iovs, dst_iovcnt, &src, &dst);
120 16 : len != 0;
121 8 : len = spdk_ioviter_next(&iter, &src, &dst)) {
122 8 : memcpy(dst, src, len);
123 : }
124 8 : }
125 :
126 : static int
127 1 : _sw_accel_compare(struct iovec *src_iovs, uint32_t src_iovcnt,
128 : struct iovec *src2_iovs, uint32_t src2_iovcnt)
129 : {
130 1 : if (spdk_unlikely(src_iovcnt != 1 || src2_iovcnt != 1)) {
131 0 : return -EINVAL;
132 : }
133 :
134 1 : if (spdk_unlikely(src_iovs[0].iov_len != src2_iovs[0].iov_len)) {
135 0 : return -EINVAL;
136 : }
137 :
138 1 : return memcmp(src_iovs[0].iov_base, src2_iovs[0].iov_base, src_iovs[0].iov_len);
139 : }
140 :
141 : static int
142 25 : _sw_accel_fill(struct iovec *iovs, uint32_t iovcnt, uint8_t fill)
143 : {
144 : void *dst;
145 : size_t nbytes;
146 :
147 25 : if (spdk_unlikely(iovcnt != 1)) {
148 0 : return -EINVAL;
149 : }
150 :
151 25 : dst = iovs[0].iov_base;
152 25 : nbytes = iovs[0].iov_len;
153 :
154 25 : memset(dst, fill, nbytes);
155 :
156 25 : return 0;
157 : }
158 :
159 : static void
160 9 : _sw_accel_crc32cv(uint32_t *crc_dst, struct iovec *iov, uint32_t iovcnt, uint32_t seed)
161 : {
162 9 : *crc_dst = spdk_crc32c_iov_update(iov, iovcnt, ~seed);
163 9 : }
164 :
165 : static int
166 2 : _sw_accel_compress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
167 : {
168 : #ifdef SPDK_CONFIG_ISAL
169 2 : size_t last_seglen = accel_task->s.iovs[accel_task->s.iovcnt - 1].iov_len;
170 2 : struct iovec *siov = accel_task->s.iovs;
171 2 : struct iovec *diov = accel_task->d.iovs;
172 : size_t remaining;
173 2 : uint32_t i, s = 0, d = 0;
174 2 : int rc = 0;
175 :
176 2 : remaining = 0;
177 4 : for (i = 0; i < accel_task->s.iovcnt; ++i) {
178 2 : remaining += accel_task->s.iovs[i].iov_len;
179 : }
180 :
181 2 : isal_deflate_reset(&sw_ch->stream);
182 2 : sw_ch->stream.end_of_stream = 0;
183 2 : sw_ch->stream.next_out = diov[d].iov_base;
184 2 : sw_ch->stream.avail_out = diov[d].iov_len;
185 2 : sw_ch->stream.next_in = siov[s].iov_base;
186 2 : sw_ch->stream.avail_in = siov[s].iov_len;
187 :
188 : do {
189 : /* if isal has exhausted the current dst iovec, move to the next
190 : * one if there is one */
191 2 : if (sw_ch->stream.avail_out == 0) {
192 0 : if (++d < accel_task->d.iovcnt) {
193 0 : sw_ch->stream.next_out = diov[d].iov_base;
194 0 : sw_ch->stream.avail_out = diov[d].iov_len;
195 0 : assert(sw_ch->stream.avail_out > 0);
196 : } else {
197 : /* we have no avail_out but also no more iovecs left so this is
198 : * the case where either the output buffer was a perfect fit
199 : * or not enough was provided. Check the ISAL state to determine
200 : * which. */
201 0 : if (sw_ch->stream.internal_state.state != ZSTATE_END) {
202 0 : SPDK_ERRLOG("Not enough destination buffer provided.\n");
203 0 : rc = -ENOMEM;
204 : }
205 0 : break;
206 : }
207 : }
208 :
209 : /* if isal has exhausted the current src iovec, move to the next
210 : * one if there is one */
211 2 : if (sw_ch->stream.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
212 0 : s++;
213 0 : sw_ch->stream.next_in = siov[s].iov_base;
214 0 : sw_ch->stream.avail_in = siov[s].iov_len;
215 0 : assert(sw_ch->stream.avail_in > 0);
216 : }
217 :
218 2 : if (remaining <= last_seglen) {
219 : /* Need to set end of stream on last block */
220 2 : sw_ch->stream.end_of_stream = 1;
221 : }
222 :
223 2 : rc = isal_deflate(&sw_ch->stream);
224 2 : if (rc) {
225 0 : SPDK_ERRLOG("isal_deflate returned error %d.\n", rc);
226 : }
227 :
228 2 : if (remaining > 0) {
229 2 : assert(siov[s].iov_len > sw_ch->stream.avail_in);
230 2 : remaining -= (siov[s].iov_len - sw_ch->stream.avail_in);
231 : }
232 :
233 2 : } while (remaining > 0 || sw_ch->stream.avail_out == 0);
234 2 : assert(sw_ch->stream.avail_in == 0);
235 :
236 : /* Get our total output size */
237 2 : if (accel_task->output_size != NULL) {
238 2 : assert(sw_ch->stream.total_out > 0);
239 2 : *accel_task->output_size = sw_ch->stream.total_out;
240 : }
241 :
242 2 : return rc;
243 : #else
244 : SPDK_ERRLOG("ISAL option is required to use software compression.\n");
245 : return -EINVAL;
246 : #endif
247 : }
248 :
249 : static int
250 7 : _sw_accel_decompress(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
251 : {
252 : #ifdef SPDK_CONFIG_ISAL
253 7 : struct iovec *siov = accel_task->s.iovs;
254 7 : struct iovec *diov = accel_task->d.iovs;
255 7 : uint32_t s = 0, d = 0;
256 7 : int rc = 0;
257 :
258 7 : isal_inflate_reset(&sw_ch->state);
259 7 : sw_ch->state.next_out = diov[d].iov_base;
260 7 : sw_ch->state.avail_out = diov[d].iov_len;
261 7 : sw_ch->state.next_in = siov[s].iov_base;
262 7 : sw_ch->state.avail_in = siov[s].iov_len;
263 :
264 : do {
265 : /* if isal has exhausted the current dst iovec, move to the next
266 : * one if there is one */
267 7 : if (sw_ch->state.avail_out == 0 && ((d + 1) < accel_task->d.iovcnt)) {
268 0 : d++;
269 0 : sw_ch->state.next_out = diov[d].iov_base;
270 0 : sw_ch->state.avail_out = diov[d].iov_len;
271 0 : assert(sw_ch->state.avail_out > 0);
272 : }
273 :
274 : /* if isal has exhausted the current src iovec, move to the next
275 : * one if there is one */
276 7 : if (sw_ch->state.avail_in == 0 && ((s + 1) < accel_task->s.iovcnt)) {
277 0 : s++;
278 0 : sw_ch->state.next_in = siov[s].iov_base;
279 0 : sw_ch->state.avail_in = siov[s].iov_len;
280 0 : assert(sw_ch->state.avail_in > 0);
281 : }
282 :
283 7 : rc = isal_inflate(&sw_ch->state);
284 7 : if (rc) {
285 0 : SPDK_ERRLOG("isal_inflate returned error %d.\n", rc);
286 : }
287 :
288 7 : } while (sw_ch->state.block_state < ISAL_BLOCK_FINISH);
289 7 : assert(sw_ch->state.avail_in == 0);
290 :
291 : /* Get our total output size */
292 7 : if (accel_task->output_size != NULL) {
293 0 : assert(sw_ch->state.total_out > 0);
294 0 : *accel_task->output_size = sw_ch->state.total_out;
295 : }
296 :
297 7 : return rc;
298 : #else
299 : SPDK_ERRLOG("ISAL option is required to use software decompression.\n");
300 : return -EINVAL;
301 : #endif
302 : }
303 :
304 : static int
305 5 : _sw_accel_crypto_operation(struct spdk_accel_task *accel_task, struct spdk_accel_crypto_key *key,
306 : sw_accel_crypto_op op)
307 : {
308 : #ifdef SPDK_CONFIG_ISAL_CRYPTO
309 5 : uint64_t iv[2];
310 : size_t remaining_len, dst_len;
311 5 : uint64_t src_offset = 0, dst_offset = 0;
312 5 : uint32_t src_iovpos = 0, dst_iovpos = 0, src_iovcnt, dst_iovcnt;
313 5 : uint32_t i, block_size, crypto_len, crypto_accum_len = 0;
314 : struct iovec *src_iov, *dst_iov;
315 : uint8_t *src, *dst;
316 :
317 : /* iv is 128 bits, since we are using logical block address (64 bits) as iv, fill first 8 bytes with zeroes */
318 5 : iv[0] = 0;
319 5 : iv[1] = accel_task->iv;
320 5 : src_iov = accel_task->s.iovs;
321 5 : src_iovcnt = accel_task->s.iovcnt;
322 5 : if (accel_task->d.iovcnt) {
323 5 : dst_iov = accel_task->d.iovs;
324 5 : dst_iovcnt = accel_task->d.iovcnt;
325 : } else {
326 : /* inplace operation */
327 0 : dst_iov = accel_task->s.iovs;
328 0 : dst_iovcnt = accel_task->s.iovcnt;
329 : }
330 5 : block_size = accel_task->block_size;
331 :
332 5 : if (!src_iovcnt || !dst_iovcnt || !block_size || !op) {
333 0 : SPDK_ERRLOG("src_iovcnt %d, dst_iovcnt %d, block_size %d, op %p\n", src_iovcnt, dst_iovcnt,
334 : block_size, op);
335 0 : return -EINVAL;
336 : }
337 :
338 5 : remaining_len = 0;
339 10 : for (i = 0; i < src_iovcnt; i++) {
340 5 : remaining_len += src_iov[i].iov_len;
341 : }
342 5 : dst_len = 0;
343 10 : for (i = 0; i < dst_iovcnt; i++) {
344 5 : dst_len += dst_iov[i].iov_len;
345 : }
346 :
347 5 : if (spdk_unlikely(remaining_len != dst_len || !remaining_len)) {
348 0 : return -ERANGE;
349 : }
350 5 : if (spdk_unlikely(remaining_len % accel_task->block_size != 0)) {
351 0 : return -EINVAL;
352 : }
353 :
354 5 : while (remaining_len) {
355 5 : crypto_len = spdk_min(block_size - crypto_accum_len, src_iov->iov_len - src_offset);
356 5 : crypto_len = spdk_min(crypto_len, dst_iov->iov_len - dst_offset);
357 5 : src = (uint8_t *)src_iov->iov_base + src_offset;
358 5 : dst = (uint8_t *)dst_iov->iov_base + dst_offset;
359 :
360 5 : op((uint8_t *)key->key2, (uint8_t *)key->key, (uint8_t *)iv, crypto_len, src, dst);
361 :
362 5 : src_offset += crypto_len;
363 5 : dst_offset += crypto_len;
364 5 : crypto_accum_len += crypto_len;
365 5 : remaining_len -= crypto_len;
366 :
367 5 : if (crypto_accum_len == block_size) {
368 : /* we can process part of logical block. Once the whole block is processed, increment iv */
369 5 : crypto_accum_len = 0;
370 5 : iv[1]++;
371 : }
372 5 : if (src_offset == src_iov->iov_len) {
373 5 : src_iov++;
374 5 : src_iovpos++;
375 5 : src_offset = 0;
376 : }
377 5 : if (src_iovpos == src_iovcnt) {
378 5 : break;
379 : }
380 0 : if (dst_offset == dst_iov->iov_len) {
381 0 : dst_iov++;
382 0 : dst_iovpos++;
383 0 : dst_offset = 0;
384 : }
385 0 : if (dst_iovpos == dst_iovcnt) {
386 0 : break;
387 : }
388 : }
389 :
390 5 : if (remaining_len) {
391 0 : SPDK_ERRLOG("remaining len %zu\n", remaining_len);
392 0 : return -EINVAL;
393 : }
394 :
395 5 : return 0;
396 : #else
397 : return -ENOTSUP;
398 : #endif
399 : }
400 :
401 : static int
402 3 : _sw_accel_encrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
403 : {
404 : struct spdk_accel_crypto_key *key;
405 : struct sw_accel_crypto_key_data *key_data;
406 :
407 3 : key = accel_task->crypto_key;
408 3 : if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
409 0 : return -EINVAL;
410 : }
411 3 : if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
412 0 : SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
413 : ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
414 0 : return -ERANGE;
415 : }
416 3 : key_data = key->priv;
417 3 : return _sw_accel_crypto_operation(accel_task, key, key_data->encrypt);
418 : }
419 :
420 : static int
421 2 : _sw_accel_decrypt(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
422 : {
423 : struct spdk_accel_crypto_key *key;
424 : struct sw_accel_crypto_key_data *key_data;
425 :
426 2 : key = accel_task->crypto_key;
427 2 : if (spdk_unlikely(key->module_if != &g_sw_module || !key->priv)) {
428 0 : return -EINVAL;
429 : }
430 2 : if (spdk_unlikely(accel_task->block_size > ACCEL_AES_XTS_MAX_BLOCK_SIZE)) {
431 0 : SPDK_WARNLOG("Max block size for AES_XTS is limited to %u, current size %u\n",
432 : ACCEL_AES_XTS_MAX_BLOCK_SIZE, accel_task->block_size);
433 0 : return -ERANGE;
434 : }
435 2 : key_data = key->priv;
436 2 : return _sw_accel_crypto_operation(accel_task, key, key_data->decrypt);
437 : }
438 :
439 : static int
440 1 : _sw_accel_xor(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
441 : {
442 2 : return spdk_xor_gen(accel_task->d.iovs[0].iov_base,
443 : accel_task->nsrcs.srcs,
444 : accel_task->nsrcs.cnt,
445 1 : accel_task->d.iovs[0].iov_len);
446 : }
447 :
448 : static int
449 0 : _sw_accel_dif_verify(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
450 : {
451 0 : return spdk_dif_verify(accel_task->s.iovs,
452 0 : accel_task->s.iovcnt,
453 : accel_task->dif.num_blocks,
454 : accel_task->dif.ctx,
455 : accel_task->dif.err);
456 : }
457 :
458 : static int
459 0 : _sw_accel_dif_generate(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
460 : {
461 0 : return spdk_dif_generate(accel_task->s.iovs,
462 0 : accel_task->s.iovcnt,
463 : accel_task->dif.num_blocks,
464 : accel_task->dif.ctx);
465 : }
466 :
467 : static int
468 0 : _sw_accel_dif_generate_copy(struct sw_accel_io_channel *sw_ch, struct spdk_accel_task *accel_task)
469 : {
470 0 : return spdk_dif_generate_copy(accel_task->s.iovs,
471 0 : accel_task->s.iovcnt,
472 : accel_task->d.iovs,
473 0 : accel_task->d.iovcnt,
474 : accel_task->dif.num_blocks,
475 : accel_task->dif.ctx);
476 : }
477 :
478 : static int
479 58 : sw_accel_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *accel_task)
480 : {
481 58 : struct sw_accel_io_channel *sw_ch = spdk_io_channel_get_ctx(ch);
482 : struct spdk_accel_task *tmp;
483 58 : int rc = 0;
484 :
485 : do {
486 58 : switch (accel_task->op_code) {
487 7 : case SPDK_ACCEL_OPC_COPY:
488 7 : _sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
489 : accel_task->s.iovs, accel_task->s.iovcnt);
490 7 : break;
491 25 : case SPDK_ACCEL_OPC_FILL:
492 25 : rc = _sw_accel_fill(accel_task->d.iovs, accel_task->d.iovcnt,
493 25 : accel_task->fill_pattern);
494 25 : break;
495 1 : case SPDK_ACCEL_OPC_DUALCAST:
496 1 : rc = _sw_accel_dualcast_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
497 : accel_task->d2.iovs, accel_task->d2.iovcnt,
498 : accel_task->s.iovs, accel_task->s.iovcnt);
499 1 : break;
500 1 : case SPDK_ACCEL_OPC_COMPARE:
501 1 : rc = _sw_accel_compare(accel_task->s.iovs, accel_task->s.iovcnt,
502 : accel_task->s2.iovs, accel_task->s2.iovcnt);
503 1 : break;
504 8 : case SPDK_ACCEL_OPC_CRC32C:
505 8 : _sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs, accel_task->s.iovcnt, accel_task->seed);
506 8 : break;
507 1 : case SPDK_ACCEL_OPC_COPY_CRC32C:
508 1 : _sw_accel_copy_iovs(accel_task->d.iovs, accel_task->d.iovcnt,
509 : accel_task->s.iovs, accel_task->s.iovcnt);
510 1 : _sw_accel_crc32cv(accel_task->crc_dst, accel_task->s.iovs,
511 : accel_task->s.iovcnt, accel_task->seed);
512 1 : break;
513 2 : case SPDK_ACCEL_OPC_COMPRESS:
514 2 : rc = _sw_accel_compress(sw_ch, accel_task);
515 2 : break;
516 7 : case SPDK_ACCEL_OPC_DECOMPRESS:
517 7 : rc = _sw_accel_decompress(sw_ch, accel_task);
518 7 : break;
519 1 : case SPDK_ACCEL_OPC_XOR:
520 1 : rc = _sw_accel_xor(sw_ch, accel_task);
521 1 : break;
522 3 : case SPDK_ACCEL_OPC_ENCRYPT:
523 3 : rc = _sw_accel_encrypt(sw_ch, accel_task);
524 3 : break;
525 2 : case SPDK_ACCEL_OPC_DECRYPT:
526 2 : rc = _sw_accel_decrypt(sw_ch, accel_task);
527 2 : break;
528 0 : case SPDK_ACCEL_OPC_DIF_VERIFY:
529 0 : rc = _sw_accel_dif_verify(sw_ch, accel_task);
530 0 : break;
531 0 : case SPDK_ACCEL_OPC_DIF_GENERATE:
532 0 : rc = _sw_accel_dif_generate(sw_ch, accel_task);
533 0 : break;
534 0 : case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
535 0 : rc = _sw_accel_dif_generate_copy(sw_ch, accel_task);
536 0 : break;
537 0 : default:
538 0 : assert(false);
539 : break;
540 : }
541 :
542 58 : tmp = STAILQ_NEXT(accel_task, link);
543 :
544 58 : _add_to_comp_list(sw_ch, accel_task, rc);
545 :
546 58 : accel_task = tmp;
547 58 : } while (accel_task);
548 :
549 58 : return 0;
550 : }
551 :
552 : static int
553 163 : accel_comp_poll(void *arg)
554 : {
555 163 : struct sw_accel_io_channel *sw_ch = arg;
556 163 : STAILQ_HEAD(, spdk_accel_task) tasks_to_complete;
557 : struct spdk_accel_task *accel_task;
558 :
559 163 : if (STAILQ_EMPTY(&sw_ch->tasks_to_complete)) {
560 113 : return SPDK_POLLER_IDLE;
561 : }
562 :
563 50 : STAILQ_INIT(&tasks_to_complete);
564 50 : STAILQ_SWAP(&tasks_to_complete, &sw_ch->tasks_to_complete, spdk_accel_task);
565 :
566 100 : while ((accel_task = STAILQ_FIRST(&tasks_to_complete))) {
567 50 : STAILQ_REMOVE_HEAD(&tasks_to_complete, link);
568 50 : spdk_accel_task_complete(accel_task, accel_task->status);
569 : }
570 :
571 50 : return SPDK_POLLER_BUSY;
572 : }
573 :
574 : static int
575 14 : sw_accel_create_cb(void *io_device, void *ctx_buf)
576 : {
577 14 : struct sw_accel_io_channel *sw_ch = ctx_buf;
578 :
579 14 : STAILQ_INIT(&sw_ch->tasks_to_complete);
580 14 : sw_ch->completion_poller = SPDK_POLLER_REGISTER(accel_comp_poll, sw_ch, 0);
581 :
582 : #ifdef SPDK_CONFIG_ISAL
583 14 : isal_deflate_init(&sw_ch->stream);
584 14 : sw_ch->stream.flush = NO_FLUSH;
585 14 : sw_ch->stream.level = 1;
586 14 : sw_ch->stream.level_buf = calloc(1, ISAL_DEF_LVL1_DEFAULT);
587 14 : if (sw_ch->stream.level_buf == NULL) {
588 0 : SPDK_ERRLOG("Could not allocate isal internal buffer\n");
589 0 : return -ENOMEM;
590 : }
591 14 : sw_ch->stream.level_buf_size = ISAL_DEF_LVL1_DEFAULT;
592 14 : isal_inflate_init(&sw_ch->state);
593 : #endif
594 :
595 14 : return 0;
596 : }
597 :
598 : static void
599 14 : sw_accel_destroy_cb(void *io_device, void *ctx_buf)
600 : {
601 14 : struct sw_accel_io_channel *sw_ch = ctx_buf;
602 :
603 : #ifdef SPDK_CONFIG_ISAL
604 14 : free(sw_ch->stream.level_buf);
605 : #endif
606 :
607 14 : spdk_poller_unregister(&sw_ch->completion_poller);
608 14 : }
609 :
610 : static struct spdk_io_channel *
611 196 : sw_accel_get_io_channel(void)
612 : {
613 196 : return spdk_get_io_channel(&g_sw_module);
614 : }
615 :
616 : static size_t
617 2 : sw_accel_module_get_ctx_size(void)
618 : {
619 2 : return sizeof(struct spdk_accel_task);
620 : }
621 :
622 : static int
623 1 : sw_accel_module_init(void)
624 : {
625 1 : spdk_io_device_register(&g_sw_module, sw_accel_create_cb, sw_accel_destroy_cb,
626 : sizeof(struct sw_accel_io_channel), "sw_accel_module");
627 :
628 1 : return 0;
629 : }
630 :
631 : static void
632 1 : sw_accel_module_fini(void *ctxt)
633 : {
634 1 : spdk_io_device_unregister(&g_sw_module, NULL);
635 1 : spdk_accel_module_finish();
636 1 : }
637 :
638 : static int
639 1 : sw_accel_create_aes_xts(struct spdk_accel_crypto_key *key)
640 : {
641 : #ifdef SPDK_CONFIG_ISAL_CRYPTO
642 : struct sw_accel_crypto_key_data *key_data;
643 :
644 1 : key_data = calloc(1, sizeof(*key_data));
645 1 : if (!key_data) {
646 0 : return -ENOMEM;
647 : }
648 :
649 1 : switch (key->key_size) {
650 1 : case SPDK_ACCEL_AES_XTS_128_KEY_SIZE:
651 1 : key_data->encrypt = XTS_AES_128_enc;
652 1 : key_data->decrypt = XTS_AES_128_dec;
653 1 : break;
654 0 : case SPDK_ACCEL_AES_XTS_256_KEY_SIZE:
655 0 : key_data->encrypt = XTS_AES_256_enc;
656 0 : key_data->decrypt = XTS_AES_256_dec;
657 0 : break;
658 0 : default:
659 0 : assert(0);
660 : free(key_data);
661 : return -EINVAL;
662 : }
663 :
664 1 : key->priv = key_data;
665 :
666 1 : return 0;
667 : #else
668 : return -ENOTSUP;
669 : #endif
670 : }
671 :
672 : static int
673 1 : sw_accel_crypto_key_init(struct spdk_accel_crypto_key *key)
674 : {
675 1 : return sw_accel_create_aes_xts(key);
676 : }
677 :
678 : static void
679 1 : sw_accel_crypto_key_deinit(struct spdk_accel_crypto_key *key)
680 : {
681 1 : if (!key || key->module_if != &g_sw_module || !key->priv) {
682 0 : return;
683 : }
684 :
685 1 : free(key->priv);
686 : }
687 :
688 : static bool
689 1 : sw_accel_crypto_supports_tweak_mode(enum spdk_accel_crypto_tweak_mode tweak_mode)
690 : {
691 1 : return tweak_mode == SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA;
692 : }
693 :
694 : static bool
695 1 : sw_accel_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
696 : {
697 1 : switch (cipher) {
698 1 : case SPDK_ACCEL_CIPHER_AES_XTS:
699 1 : return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
700 0 : default:
701 0 : return false;
702 : }
703 : }
704 :
705 : static int
706 0 : sw_accel_get_operation_info(enum spdk_accel_opcode opcode,
707 : const struct spdk_accel_operation_exec_ctx *ctx,
708 : struct spdk_accel_opcode_info *info)
709 : {
710 0 : info->required_alignment = 0;
711 :
712 0 : return 0;
713 : }
714 :
715 : static struct spdk_accel_module_if g_sw_module = {
716 : .module_init = sw_accel_module_init,
717 : .module_fini = sw_accel_module_fini,
718 : .write_config_json = NULL,
719 : .get_ctx_size = sw_accel_module_get_ctx_size,
720 : .name = "software",
721 : .priority = SPDK_ACCEL_SW_PRIORITY,
722 : .supports_opcode = sw_accel_supports_opcode,
723 : .get_io_channel = sw_accel_get_io_channel,
724 : .submit_tasks = sw_accel_submit_tasks,
725 : .crypto_key_init = sw_accel_crypto_key_init,
726 : .crypto_key_deinit = sw_accel_crypto_key_deinit,
727 : .crypto_supports_tweak_mode = sw_accel_crypto_supports_tweak_mode,
728 : .crypto_supports_cipher = sw_accel_crypto_supports_cipher,
729 : .get_operation_info = sw_accel_get_operation_info,
730 : };
731 :
732 1 : SPDK_ACCEL_MODULE_REGISTER(sw, &g_sw_module)
|