Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "spdk/stdinc.h"
7 :
8 : #include "spdk/env.h"
9 : #include "spdk/util.h"
10 : #include "spdk/memory.h"
11 : #include "spdk/likely.h"
12 :
13 : #include "spdk/log.h"
14 : #include "spdk_internal/idxd.h"
15 :
16 : #include "idxd_internal.h"
17 :
18 : #define ALIGN_4K 0x1000
19 : #define USERSPACE_DRIVER_NAME "user"
20 : #define KERNEL_DRIVER_NAME "kernel"
21 :
22 : /* The max number of completions processed per poll */
23 : #define IDXD_MAX_COMPLETIONS 128
24 :
25 : /* The minimum number of entries in batch per flush */
26 : #define IDXD_MIN_BATCH_FLUSH 32
27 :
28 : #define DATA_BLOCK_SIZE_512 512
29 : #define DATA_BLOCK_SIZE_520 520
30 : #define DATA_BLOCK_SIZE_4096 4096
31 : #define DATA_BLOCK_SIZE_4104 4104
32 :
33 : #define METADATA_SIZE_8 8
34 : #define METADATA_SIZE_16 16
35 :
36 : static STAILQ_HEAD(, spdk_idxd_impl) g_idxd_impls = STAILQ_HEAD_INITIALIZER(g_idxd_impls);
37 : static struct spdk_idxd_impl *g_idxd_impl;
38 :
39 : uint32_t
40 0 : spdk_idxd_get_socket(struct spdk_idxd_device *idxd)
41 : {
42 0 : return idxd->socket_id;
43 : }
44 :
45 : static inline void
46 0 : _submit_to_hw(struct spdk_idxd_io_channel *chan, struct idxd_ops *op)
47 : {
48 0 : STAILQ_INSERT_TAIL(&chan->ops_outstanding, op, link);
49 : /*
50 : * We must barrier before writing the descriptor to ensure that data
51 : * has been correctly flushed from the associated data buffers before DMA
52 : * operations begin.
53 : */
54 0 : _spdk_wmb();
55 0 : movdir64b(chan->portal + chan->portal_offset, op->desc);
56 0 : chan->portal_offset = (chan->portal_offset + chan->idxd->chan_per_device * PORTAL_STRIDE) &
57 : PORTAL_MASK;
58 0 : }
59 :
60 : inline static int
61 0 : _vtophys(struct spdk_idxd_io_channel *chan, const void *buf, uint64_t *buf_addr, uint64_t size)
62 : {
63 0 : uint64_t updated_size = size;
64 :
65 0 : if (chan->pasid_enabled) {
66 : /* We can just use virtual addresses */
67 0 : *buf_addr = (uint64_t)buf;
68 0 : return 0;
69 : }
70 :
71 0 : *buf_addr = spdk_vtophys(buf, &updated_size);
72 :
73 0 : if (*buf_addr == SPDK_VTOPHYS_ERROR) {
74 0 : SPDK_ERRLOG("Error translating address\n");
75 0 : return -EINVAL;
76 : }
77 :
78 0 : if (updated_size < size) {
79 0 : SPDK_ERRLOG("Error translating size (0x%lx), return size (0x%lx)\n", size, updated_size);
80 0 : return -EINVAL;
81 : }
82 :
83 0 : return 0;
84 : }
85 :
86 : struct idxd_vtophys_iter {
87 : const void *src;
88 : void *dst;
89 : uint64_t len;
90 :
91 : uint64_t offset;
92 :
93 : bool pasid_enabled;
94 : };
95 :
96 : static void
97 0 : idxd_vtophys_iter_init(struct spdk_idxd_io_channel *chan,
98 : struct idxd_vtophys_iter *iter,
99 : const void *src, void *dst, uint64_t len)
100 : {
101 0 : iter->src = src;
102 0 : iter->dst = dst;
103 0 : iter->len = len;
104 0 : iter->offset = 0;
105 0 : iter->pasid_enabled = chan->pasid_enabled;
106 0 : }
107 :
108 : static uint64_t
109 0 : idxd_vtophys_iter_next(struct idxd_vtophys_iter *iter,
110 : uint64_t *src_phys, uint64_t *dst_phys)
111 : {
112 0 : uint64_t src_off, dst_off, len;
113 : const void *src;
114 : void *dst;
115 :
116 0 : src = iter->src + iter->offset;
117 0 : dst = iter->dst + iter->offset;
118 :
119 0 : if (iter->offset == iter->len) {
120 0 : return 0;
121 : }
122 :
123 0 : if (iter->pasid_enabled) {
124 0 : *src_phys = (uint64_t)src;
125 0 : *dst_phys = (uint64_t)dst;
126 0 : return iter->len;
127 : }
128 :
129 0 : len = iter->len - iter->offset;
130 :
131 0 : src_off = len;
132 0 : *src_phys = spdk_vtophys(src, &src_off);
133 0 : if (*src_phys == SPDK_VTOPHYS_ERROR) {
134 0 : SPDK_ERRLOG("Error translating address\n");
135 0 : return SPDK_VTOPHYS_ERROR;
136 : }
137 :
138 0 : dst_off = len;
139 0 : *dst_phys = spdk_vtophys(dst, &dst_off);
140 0 : if (*dst_phys == SPDK_VTOPHYS_ERROR) {
141 0 : SPDK_ERRLOG("Error translating address\n");
142 0 : return SPDK_VTOPHYS_ERROR;
143 : }
144 :
145 0 : len = spdk_min(src_off, dst_off);
146 0 : iter->offset += len;
147 :
148 0 : return len;
149 : }
150 :
151 : /* helper function for DSA specific spdk_idxd_get_channel() stuff */
152 : static int
153 0 : _dsa_alloc_batches(struct spdk_idxd_io_channel *chan, int num_descriptors)
154 : {
155 : struct idxd_batch *batch;
156 : struct idxd_hw_desc *desc;
157 : struct idxd_ops *op;
158 0 : int i, j, num_batches, rc = -1;
159 :
160 : /* Allocate batches */
161 0 : num_batches = num_descriptors;
162 0 : chan->batch_base = calloc(num_batches, sizeof(struct idxd_batch));
163 0 : if (chan->batch_base == NULL) {
164 0 : SPDK_ERRLOG("Failed to allocate batch pool\n");
165 0 : goto error_desc;
166 : }
167 0 : batch = chan->batch_base;
168 0 : for (i = 0 ; i < num_batches ; i++) {
169 0 : batch->size = chan->idxd->batch_size;
170 0 : batch->user_desc = desc = spdk_zmalloc(batch->size * sizeof(struct idxd_hw_desc),
171 : 0x40, NULL,
172 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
173 0 : if (batch->user_desc == NULL) {
174 0 : SPDK_ERRLOG("Failed to allocate batch descriptor memory\n");
175 0 : goto error_user;
176 : }
177 :
178 0 : rc = _vtophys(chan, batch->user_desc, &batch->user_desc_addr,
179 0 : batch->size * sizeof(struct idxd_hw_desc));
180 0 : if (rc) {
181 0 : SPDK_ERRLOG("Failed to translate batch descriptor memory\n");
182 0 : goto error_user;
183 : }
184 :
185 0 : batch->user_ops = op = spdk_zmalloc(batch->size * sizeof(struct idxd_ops),
186 : 0x40, NULL,
187 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
188 0 : if (batch->user_ops == NULL) {
189 0 : SPDK_ERRLOG("Failed to allocate user completion memory\n");
190 0 : goto error_user;
191 : }
192 :
193 0 : for (j = 0; j < batch->size; j++) {
194 0 : rc = _vtophys(chan, &op->hw, &desc->completion_addr, sizeof(struct dsa_hw_comp_record));
195 0 : if (rc) {
196 0 : SPDK_ERRLOG("Failed to translate batch entry completion memory\n");
197 0 : goto error_user;
198 : }
199 0 : op++;
200 0 : desc++;
201 : }
202 0 : TAILQ_INSERT_TAIL(&chan->batch_pool, batch, link);
203 0 : batch++;
204 : }
205 0 : return 0;
206 :
207 0 : error_user:
208 0 : TAILQ_FOREACH(batch, &chan->batch_pool, link) {
209 0 : spdk_free(batch->user_ops);
210 0 : batch->user_ops = NULL;
211 0 : spdk_free(batch->user_desc);
212 0 : batch->user_desc = NULL;
213 : }
214 0 : spdk_free(chan->ops_base);
215 0 : chan->ops_base = NULL;
216 0 : error_desc:
217 0 : STAILQ_INIT(&chan->ops_pool);
218 0 : spdk_free(chan->desc_base);
219 0 : chan->desc_base = NULL;
220 0 : return rc;
221 : }
222 :
223 : struct spdk_idxd_io_channel *
224 0 : spdk_idxd_get_channel(struct spdk_idxd_device *idxd)
225 : {
226 : struct spdk_idxd_io_channel *chan;
227 : struct idxd_hw_desc *desc;
228 : struct idxd_ops *op;
229 0 : int i, num_descriptors, rc = -1;
230 : uint32_t comp_rec_size;
231 :
232 0 : assert(idxd != NULL);
233 :
234 0 : chan = calloc(1, sizeof(struct spdk_idxd_io_channel));
235 0 : if (chan == NULL) {
236 0 : SPDK_ERRLOG("Failed to allocate idxd chan\n");
237 0 : return NULL;
238 : }
239 :
240 0 : chan->idxd = idxd;
241 0 : chan->pasid_enabled = idxd->pasid_enabled;
242 0 : STAILQ_INIT(&chan->ops_pool);
243 0 : TAILQ_INIT(&chan->batch_pool);
244 0 : STAILQ_INIT(&chan->ops_outstanding);
245 :
246 : /* Assign WQ, portal */
247 0 : pthread_mutex_lock(&idxd->num_channels_lock);
248 0 : if (idxd->num_channels == idxd->chan_per_device) {
249 : /* too many channels sharing this device */
250 0 : pthread_mutex_unlock(&idxd->num_channels_lock);
251 0 : SPDK_ERRLOG("Too many channels sharing this device\n");
252 0 : goto error;
253 : }
254 :
255 : /* Have each channel start at a different offset. */
256 0 : chan->portal = idxd->impl->portal_get_addr(idxd);
257 0 : chan->portal_offset = (idxd->num_channels * PORTAL_STRIDE) & PORTAL_MASK;
258 0 : idxd->num_channels++;
259 :
260 0 : pthread_mutex_unlock(&idxd->num_channels_lock);
261 :
262 : /* Allocate descriptors and completions */
263 0 : num_descriptors = idxd->total_wq_size / idxd->chan_per_device;
264 0 : chan->desc_base = desc = spdk_zmalloc(num_descriptors * sizeof(struct idxd_hw_desc),
265 : 0x40, NULL,
266 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
267 0 : if (chan->desc_base == NULL) {
268 0 : SPDK_ERRLOG("Failed to allocate DSA descriptor memory\n");
269 0 : goto error;
270 : }
271 :
272 0 : chan->ops_base = op = spdk_zmalloc(num_descriptors * sizeof(struct idxd_ops),
273 : 0x40, NULL,
274 : SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
275 0 : if (chan->ops_base == NULL) {
276 0 : SPDK_ERRLOG("Failed to allocate idxd_ops memory\n");
277 0 : goto error;
278 : }
279 :
280 0 : if (idxd->type == IDXD_DEV_TYPE_DSA) {
281 0 : comp_rec_size = sizeof(struct dsa_hw_comp_record);
282 0 : if (_dsa_alloc_batches(chan, num_descriptors)) {
283 0 : goto error;
284 : }
285 : } else {
286 0 : comp_rec_size = sizeof(struct iaa_hw_comp_record);
287 : }
288 :
289 0 : for (i = 0; i < num_descriptors; i++) {
290 0 : STAILQ_INSERT_TAIL(&chan->ops_pool, op, link);
291 0 : op->desc = desc;
292 0 : rc = _vtophys(chan, &op->hw, &desc->completion_addr, comp_rec_size);
293 0 : if (rc) {
294 0 : SPDK_ERRLOG("Failed to translate completion memory\n");
295 0 : goto error;
296 : }
297 0 : op++;
298 0 : desc++;
299 : }
300 :
301 0 : return chan;
302 :
303 0 : error:
304 0 : spdk_free(chan->ops_base);
305 0 : chan->ops_base = NULL;
306 0 : spdk_free(chan->desc_base);
307 0 : chan->desc_base = NULL;
308 0 : free(chan);
309 0 : return NULL;
310 : }
311 :
312 : static int idxd_batch_cancel(struct spdk_idxd_io_channel *chan, int status);
313 :
314 : void
315 0 : spdk_idxd_put_channel(struct spdk_idxd_io_channel *chan)
316 : {
317 : struct idxd_batch *batch;
318 :
319 0 : assert(chan != NULL);
320 0 : assert(chan->idxd != NULL);
321 :
322 0 : if (chan->batch) {
323 0 : idxd_batch_cancel(chan, -ECANCELED);
324 : }
325 :
326 0 : pthread_mutex_lock(&chan->idxd->num_channels_lock);
327 0 : assert(chan->idxd->num_channels > 0);
328 0 : chan->idxd->num_channels--;
329 0 : pthread_mutex_unlock(&chan->idxd->num_channels_lock);
330 :
331 0 : spdk_free(chan->ops_base);
332 0 : spdk_free(chan->desc_base);
333 0 : while ((batch = TAILQ_FIRST(&chan->batch_pool))) {
334 0 : TAILQ_REMOVE(&chan->batch_pool, batch, link);
335 0 : spdk_free(batch->user_ops);
336 0 : spdk_free(batch->user_desc);
337 : }
338 0 : free(chan->batch_base);
339 0 : free(chan);
340 0 : }
341 :
342 : static inline struct spdk_idxd_impl *
343 0 : idxd_get_impl_by_name(const char *impl_name)
344 : {
345 : struct spdk_idxd_impl *impl;
346 :
347 0 : assert(impl_name != NULL);
348 0 : STAILQ_FOREACH(impl, &g_idxd_impls, link) {
349 0 : if (0 == strcmp(impl_name, impl->name)) {
350 0 : return impl;
351 : }
352 : }
353 :
354 0 : return NULL;
355 : }
356 :
357 : int
358 0 : spdk_idxd_set_config(bool kernel_mode)
359 : {
360 : struct spdk_idxd_impl *tmp;
361 :
362 0 : if (kernel_mode) {
363 0 : tmp = idxd_get_impl_by_name(KERNEL_DRIVER_NAME);
364 : } else {
365 0 : tmp = idxd_get_impl_by_name(USERSPACE_DRIVER_NAME);
366 : }
367 :
368 0 : if (g_idxd_impl != NULL && g_idxd_impl != tmp) {
369 0 : SPDK_ERRLOG("Cannot change idxd implementation after devices are initialized\n");
370 0 : assert(false);
371 : return -EALREADY;
372 : }
373 0 : g_idxd_impl = tmp;
374 :
375 0 : if (g_idxd_impl == NULL) {
376 0 : SPDK_ERRLOG("Cannot set the idxd implementation with %s mode\n",
377 : kernel_mode ? KERNEL_DRIVER_NAME : USERSPACE_DRIVER_NAME);
378 0 : return -EINVAL;
379 : }
380 :
381 0 : return 0;
382 : }
383 :
384 : static void
385 0 : idxd_device_destruct(struct spdk_idxd_device *idxd)
386 : {
387 0 : assert(idxd->impl != NULL);
388 :
389 0 : idxd->impl->destruct(idxd);
390 0 : }
391 :
392 : int
393 0 : spdk_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb,
394 : spdk_idxd_probe_cb probe_cb)
395 : {
396 0 : if (g_idxd_impl == NULL) {
397 0 : SPDK_ERRLOG("No idxd impl is selected\n");
398 0 : return -1;
399 : }
400 :
401 0 : return g_idxd_impl->probe(cb_ctx, attach_cb, probe_cb);
402 : }
403 :
404 : void
405 0 : spdk_idxd_detach(struct spdk_idxd_device *idxd)
406 : {
407 0 : assert(idxd != NULL);
408 0 : idxd_device_destruct(idxd);
409 0 : }
410 :
411 : static int
412 0 : _idxd_prep_command(struct spdk_idxd_io_channel *chan, spdk_idxd_req_cb cb_fn, void *cb_arg,
413 : int flags, struct idxd_hw_desc **_desc, struct idxd_ops **_op)
414 : {
415 : struct idxd_hw_desc *desc;
416 : struct idxd_ops *op;
417 : uint64_t comp_addr;
418 :
419 0 : if (!STAILQ_EMPTY(&chan->ops_pool)) {
420 0 : op = *_op = STAILQ_FIRST(&chan->ops_pool);
421 0 : desc = *_desc = op->desc;
422 0 : comp_addr = desc->completion_addr;
423 0 : memset(desc, 0, sizeof(*desc));
424 0 : desc->completion_addr = comp_addr;
425 0 : STAILQ_REMOVE_HEAD(&chan->ops_pool, link);
426 : } else {
427 : /* The application needs to handle this, violation of flow control */
428 0 : return -EBUSY;
429 : }
430 :
431 0 : flags |= IDXD_FLAG_COMPLETION_ADDR_VALID;
432 0 : flags |= IDXD_FLAG_REQUEST_COMPLETION;
433 :
434 0 : desc->flags = flags;
435 0 : op->cb_arg = cb_arg;
436 0 : op->cb_fn = cb_fn;
437 0 : op->batch = NULL;
438 0 : op->parent = NULL;
439 0 : op->count = 1;
440 :
441 0 : return 0;
442 : }
443 :
444 : static int
445 0 : _idxd_prep_batch_cmd(struct spdk_idxd_io_channel *chan, spdk_idxd_req_cb cb_fn,
446 : void *cb_arg, int flags,
447 : struct idxd_hw_desc **_desc, struct idxd_ops **_op)
448 : {
449 : struct idxd_hw_desc *desc;
450 : struct idxd_ops *op;
451 : uint64_t comp_addr;
452 : struct idxd_batch *batch;
453 :
454 0 : batch = chan->batch;
455 :
456 0 : assert(batch != NULL);
457 0 : if (batch->index == batch->size) {
458 0 : return -EBUSY;
459 : }
460 :
461 0 : desc = *_desc = &batch->user_desc[batch->index];
462 0 : op = *_op = &batch->user_ops[batch->index];
463 :
464 0 : op->desc = desc;
465 0 : SPDK_DEBUGLOG(idxd, "Prep batch %p index %u\n", batch, batch->index);
466 :
467 0 : batch->index++;
468 :
469 0 : comp_addr = desc->completion_addr;
470 0 : memset(desc, 0, sizeof(*desc));
471 0 : desc->completion_addr = comp_addr;
472 0 : flags |= IDXD_FLAG_COMPLETION_ADDR_VALID;
473 0 : flags |= IDXD_FLAG_REQUEST_COMPLETION;
474 0 : desc->flags = flags;
475 0 : op->cb_arg = cb_arg;
476 0 : op->cb_fn = cb_fn;
477 0 : op->batch = batch;
478 0 : op->parent = NULL;
479 0 : op->count = 1;
480 0 : op->crc_dst = NULL;
481 :
482 0 : return 0;
483 : }
484 :
485 : static struct idxd_batch *
486 0 : idxd_batch_create(struct spdk_idxd_io_channel *chan)
487 : {
488 : struct idxd_batch *batch;
489 :
490 0 : assert(chan != NULL);
491 0 : assert(chan->batch == NULL);
492 :
493 0 : if (!TAILQ_EMPTY(&chan->batch_pool)) {
494 0 : batch = TAILQ_FIRST(&chan->batch_pool);
495 0 : batch->index = 0;
496 0 : batch->chan = chan;
497 0 : chan->batch = batch;
498 0 : TAILQ_REMOVE(&chan->batch_pool, batch, link);
499 : } else {
500 : /* The application needs to handle this. */
501 0 : return NULL;
502 : }
503 :
504 0 : return batch;
505 : }
506 :
507 : static void
508 0 : _free_batch(struct idxd_batch *batch, struct spdk_idxd_io_channel *chan)
509 : {
510 0 : SPDK_DEBUGLOG(idxd, "Free batch %p\n", batch);
511 0 : assert(batch->refcnt == 0);
512 0 : batch->index = 0;
513 0 : batch->chan = NULL;
514 0 : TAILQ_INSERT_TAIL(&chan->batch_pool, batch, link);
515 0 : }
516 :
517 : static int
518 0 : idxd_batch_cancel(struct spdk_idxd_io_channel *chan, int status)
519 : {
520 : struct idxd_ops *op;
521 : struct idxd_batch *batch;
522 : int i;
523 :
524 0 : assert(chan != NULL);
525 :
526 0 : batch = chan->batch;
527 0 : assert(batch != NULL);
528 :
529 0 : if (batch->index == UINT16_MAX) {
530 0 : SPDK_ERRLOG("Cannot cancel batch, already submitted to HW.\n");
531 0 : return -EINVAL;
532 : }
533 :
534 0 : chan->batch = NULL;
535 :
536 0 : for (i = 0; i < batch->index; i++) {
537 0 : op = &batch->user_ops[i];
538 0 : if (op->cb_fn) {
539 0 : op->cb_fn(op->cb_arg, status);
540 : }
541 : }
542 :
543 0 : _free_batch(batch, chan);
544 :
545 0 : return 0;
546 : }
547 :
548 : static int
549 0 : idxd_batch_submit(struct spdk_idxd_io_channel *chan,
550 : spdk_idxd_req_cb cb_fn, void *cb_arg)
551 : {
552 0 : struct idxd_hw_desc *desc;
553 : struct idxd_batch *batch;
554 0 : struct idxd_ops *op;
555 0 : int i, rc, flags = 0;
556 :
557 0 : assert(chan != NULL);
558 :
559 0 : batch = chan->batch;
560 0 : assert(batch != NULL);
561 :
562 0 : if (batch->index == 0) {
563 0 : return idxd_batch_cancel(chan, 0);
564 : }
565 :
566 : /* Common prep. */
567 0 : rc = _idxd_prep_command(chan, cb_fn, cb_arg, flags, &desc, &op);
568 0 : if (rc) {
569 0 : return rc;
570 : }
571 :
572 0 : if (batch->index == 1) {
573 : uint64_t completion_addr;
574 :
575 : /* If there's only one command, convert it away from a batch. */
576 0 : completion_addr = desc->completion_addr;
577 0 : memcpy(desc, &batch->user_desc[0], sizeof(*desc));
578 0 : desc->completion_addr = completion_addr;
579 0 : op->cb_fn = batch->user_ops[0].cb_fn;
580 0 : op->cb_arg = batch->user_ops[0].cb_arg;
581 0 : op->crc_dst = batch->user_ops[0].crc_dst;
582 0 : _free_batch(batch, chan);
583 : } else {
584 : /* Command specific. */
585 0 : desc->opcode = IDXD_OPCODE_BATCH;
586 0 : desc->desc_list_addr = batch->user_desc_addr;
587 0 : desc->desc_count = batch->index;
588 0 : assert(batch->index <= batch->size);
589 :
590 : /* Add the batch elements completion contexts to the outstanding list to be polled. */
591 0 : for (i = 0 ; i < batch->index; i++) {
592 0 : batch->refcnt++;
593 0 : STAILQ_INSERT_TAIL(&chan->ops_outstanding, (struct idxd_ops *)&batch->user_ops[i],
594 : link);
595 : }
596 0 : batch->index = UINT16_MAX;
597 : }
598 :
599 0 : chan->batch = NULL;
600 :
601 : /* Submit operation. */
602 0 : _submit_to_hw(chan, op);
603 0 : SPDK_DEBUGLOG(idxd, "Submitted batch %p\n", batch);
604 :
605 0 : return 0;
606 : }
607 :
608 : static int
609 0 : _idxd_setup_batch(struct spdk_idxd_io_channel *chan)
610 : {
611 : struct idxd_batch *batch;
612 :
613 0 : if (chan->batch == NULL) {
614 0 : batch = idxd_batch_create(chan);
615 0 : if (batch == NULL) {
616 0 : return -EBUSY;
617 : }
618 : }
619 :
620 0 : return 0;
621 : }
622 :
623 : static int
624 0 : _idxd_flush_batch(struct spdk_idxd_io_channel *chan)
625 : {
626 0 : struct idxd_batch *batch = chan->batch;
627 : int rc;
628 :
629 0 : if (batch != NULL && batch->index >= IDXD_MIN_BATCH_FLUSH) {
630 : /* Close out the full batch */
631 0 : rc = idxd_batch_submit(chan, NULL, NULL);
632 0 : if (rc) {
633 0 : assert(rc == -EBUSY);
634 : /*
635 : * Return 0. This will get re-submitted within idxd_process_events where
636 : * if it fails, it will get correctly aborted.
637 : */
638 0 : return 0;
639 : }
640 : }
641 :
642 0 : return 0;
643 : }
644 :
645 : static inline void
646 0 : _update_write_flags(struct spdk_idxd_io_channel *chan, struct idxd_hw_desc *desc)
647 : {
648 0 : desc->flags ^= IDXD_FLAG_CACHE_CONTROL;
649 0 : }
650 :
651 : int
652 0 : spdk_idxd_submit_copy(struct spdk_idxd_io_channel *chan,
653 : struct iovec *diov, uint32_t diovcnt,
654 : struct iovec *siov, uint32_t siovcnt,
655 : int flags, spdk_idxd_req_cb cb_fn, void *cb_arg)
656 : {
657 0 : struct idxd_hw_desc *desc;
658 0 : struct idxd_ops *first_op, *op;
659 0 : void *src, *dst;
660 0 : uint64_t src_addr, dst_addr;
661 : int rc, count;
662 : uint64_t len, seg_len;
663 0 : struct spdk_ioviter iter;
664 0 : struct idxd_vtophys_iter vtophys_iter;
665 :
666 0 : assert(chan != NULL);
667 0 : assert(diov != NULL);
668 0 : assert(siov != NULL);
669 :
670 0 : rc = _idxd_setup_batch(chan);
671 0 : if (rc) {
672 0 : return rc;
673 : }
674 :
675 0 : count = 0;
676 0 : first_op = NULL;
677 0 : for (len = spdk_ioviter_first(&iter, siov, siovcnt, diov, diovcnt, &src, &dst);
678 0 : len > 0;
679 0 : len = spdk_ioviter_next(&iter, &src, &dst)) {
680 :
681 0 : idxd_vtophys_iter_init(chan, &vtophys_iter, src, dst, len);
682 :
683 0 : while (len > 0) {
684 0 : if (first_op == NULL) {
685 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
686 0 : if (rc) {
687 0 : goto error;
688 : }
689 :
690 0 : first_op = op;
691 : } else {
692 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
693 0 : if (rc) {
694 0 : goto error;
695 : }
696 :
697 0 : first_op->count++;
698 0 : op->parent = first_op;
699 : }
700 :
701 0 : count++;
702 :
703 0 : src_addr = 0;
704 0 : dst_addr = 0;
705 0 : seg_len = idxd_vtophys_iter_next(&vtophys_iter, &src_addr, &dst_addr);
706 0 : if (seg_len == SPDK_VTOPHYS_ERROR) {
707 0 : rc = -EFAULT;
708 0 : goto error;
709 : }
710 :
711 0 : desc->opcode = IDXD_OPCODE_MEMMOVE;
712 0 : desc->src_addr = src_addr;
713 0 : desc->dst_addr = dst_addr;
714 0 : desc->xfer_size = seg_len;
715 0 : _update_write_flags(chan, desc);
716 :
717 0 : len -= seg_len;
718 : }
719 : }
720 :
721 0 : return _idxd_flush_batch(chan);
722 :
723 0 : error:
724 0 : chan->batch->index -= count;
725 0 : return rc;
726 : }
727 :
728 : /* Dual-cast copies the same source to two separate destination buffers. */
729 : int
730 0 : spdk_idxd_submit_dualcast(struct spdk_idxd_io_channel *chan, void *dst1, void *dst2,
731 : const void *src, uint64_t nbytes, int flags,
732 : spdk_idxd_req_cb cb_fn, void *cb_arg)
733 : {
734 0 : struct idxd_hw_desc *desc;
735 0 : struct idxd_ops *first_op, *op;
736 0 : uint64_t src_addr, dst1_addr, dst2_addr;
737 : int rc, count;
738 : uint64_t len;
739 : uint64_t outer_seg_len, inner_seg_len;
740 0 : struct idxd_vtophys_iter iter_outer, iter_inner;
741 :
742 0 : assert(chan != NULL);
743 0 : assert(dst1 != NULL);
744 0 : assert(dst2 != NULL);
745 0 : assert(src != NULL);
746 :
747 0 : if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
748 0 : SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
749 0 : return -EINVAL;
750 : }
751 :
752 0 : rc = _idxd_setup_batch(chan);
753 0 : if (rc) {
754 0 : return rc;
755 : }
756 :
757 0 : idxd_vtophys_iter_init(chan, &iter_outer, src, dst1, nbytes);
758 :
759 0 : first_op = NULL;
760 0 : count = 0;
761 0 : while (nbytes > 0) {
762 0 : src_addr = 0;
763 0 : dst1_addr = 0;
764 0 : outer_seg_len = idxd_vtophys_iter_next(&iter_outer, &src_addr, &dst1_addr);
765 0 : if (outer_seg_len == SPDK_VTOPHYS_ERROR) {
766 0 : goto error;
767 : }
768 :
769 0 : idxd_vtophys_iter_init(chan, &iter_inner, src, dst2, nbytes);
770 :
771 0 : src += outer_seg_len;
772 0 : nbytes -= outer_seg_len;
773 :
774 0 : while (outer_seg_len > 0) {
775 0 : if (first_op == NULL) {
776 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
777 0 : if (rc) {
778 0 : goto error;
779 : }
780 :
781 0 : first_op = op;
782 : } else {
783 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
784 0 : if (rc) {
785 0 : goto error;
786 : }
787 :
788 0 : first_op->count++;
789 0 : op->parent = first_op;
790 : }
791 :
792 0 : count++;
793 :
794 0 : src_addr = 0;
795 0 : dst2_addr = 0;
796 0 : inner_seg_len = idxd_vtophys_iter_next(&iter_inner, &src_addr, &dst2_addr);
797 0 : if (inner_seg_len == SPDK_VTOPHYS_ERROR) {
798 0 : rc = -EFAULT;
799 0 : goto error;
800 : }
801 :
802 0 : len = spdk_min(outer_seg_len, inner_seg_len);
803 :
804 : /* Command specific. */
805 0 : desc->opcode = IDXD_OPCODE_DUALCAST;
806 0 : desc->src_addr = src_addr;
807 0 : desc->dst_addr = dst1_addr;
808 0 : desc->dest2 = dst2_addr;
809 0 : desc->xfer_size = len;
810 0 : _update_write_flags(chan, desc);
811 :
812 0 : dst1_addr += len;
813 0 : outer_seg_len -= len;
814 : }
815 : }
816 :
817 0 : return _idxd_flush_batch(chan);
818 :
819 0 : error:
820 0 : chan->batch->index -= count;
821 0 : return rc;
822 : }
823 :
824 : int
825 0 : spdk_idxd_submit_compare(struct spdk_idxd_io_channel *chan,
826 : struct iovec *siov1, size_t siov1cnt,
827 : struct iovec *siov2, size_t siov2cnt,
828 : int flags, spdk_idxd_req_cb cb_fn, void *cb_arg)
829 : {
830 :
831 0 : struct idxd_hw_desc *desc;
832 0 : struct idxd_ops *first_op, *op;
833 0 : void *src1, *src2;
834 0 : uint64_t src1_addr, src2_addr;
835 : int rc, count;
836 : uint64_t len, seg_len;
837 0 : struct spdk_ioviter iter;
838 0 : struct idxd_vtophys_iter vtophys_iter;
839 :
840 0 : assert(chan != NULL);
841 0 : assert(siov1 != NULL);
842 0 : assert(siov2 != NULL);
843 :
844 0 : rc = _idxd_setup_batch(chan);
845 0 : if (rc) {
846 0 : return rc;
847 : }
848 :
849 0 : count = 0;
850 0 : first_op = NULL;
851 0 : for (len = spdk_ioviter_first(&iter, siov1, siov1cnt, siov2, siov2cnt, &src1, &src2);
852 0 : len > 0;
853 0 : len = spdk_ioviter_next(&iter, &src1, &src2)) {
854 :
855 0 : idxd_vtophys_iter_init(chan, &vtophys_iter, src1, src2, len);
856 :
857 0 : while (len > 0) {
858 0 : if (first_op == NULL) {
859 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
860 0 : if (rc) {
861 0 : goto error;
862 : }
863 :
864 0 : first_op = op;
865 : } else {
866 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
867 0 : if (rc) {
868 0 : goto error;
869 : }
870 :
871 0 : first_op->count++;
872 0 : op->parent = first_op;
873 : }
874 :
875 0 : count++;
876 :
877 0 : src1_addr = 0;
878 0 : src2_addr = 0;
879 0 : seg_len = idxd_vtophys_iter_next(&vtophys_iter, &src1_addr, &src2_addr);
880 0 : if (seg_len == SPDK_VTOPHYS_ERROR) {
881 0 : rc = -EFAULT;
882 0 : goto error;
883 : }
884 :
885 0 : desc->opcode = IDXD_OPCODE_COMPARE;
886 0 : desc->src_addr = src1_addr;
887 0 : desc->src2_addr = src2_addr;
888 0 : desc->xfer_size = seg_len;
889 :
890 0 : len -= seg_len;
891 : }
892 : }
893 :
894 0 : return _idxd_flush_batch(chan);
895 :
896 0 : error:
897 0 : chan->batch->index -= count;
898 0 : return rc;
899 : }
900 :
901 : int
902 0 : spdk_idxd_submit_fill(struct spdk_idxd_io_channel *chan,
903 : struct iovec *diov, size_t diovcnt,
904 : uint64_t fill_pattern, int flags,
905 : spdk_idxd_req_cb cb_fn, void *cb_arg)
906 : {
907 0 : struct idxd_hw_desc *desc;
908 0 : struct idxd_ops *first_op, *op;
909 : uint64_t dst_addr;
910 : int rc, count;
911 0 : uint64_t len, seg_len;
912 : void *dst;
913 : size_t i;
914 :
915 0 : assert(chan != NULL);
916 0 : assert(diov != NULL);
917 :
918 0 : rc = _idxd_setup_batch(chan);
919 0 : if (rc) {
920 0 : return rc;
921 : }
922 :
923 0 : count = 0;
924 0 : first_op = NULL;
925 0 : for (i = 0; i < diovcnt; i++) {
926 0 : len = diov[i].iov_len;
927 0 : dst = diov[i].iov_base;
928 :
929 0 : while (len > 0) {
930 0 : if (first_op == NULL) {
931 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
932 0 : if (rc) {
933 0 : goto error;
934 : }
935 :
936 0 : first_op = op;
937 : } else {
938 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
939 0 : if (rc) {
940 0 : goto error;
941 : }
942 :
943 0 : first_op->count++;
944 0 : op->parent = first_op;
945 : }
946 :
947 0 : count++;
948 :
949 0 : seg_len = len;
950 0 : if (chan->pasid_enabled) {
951 0 : dst_addr = (uint64_t)dst;
952 : } else {
953 0 : dst_addr = spdk_vtophys(dst, &seg_len);
954 0 : if (dst_addr == SPDK_VTOPHYS_ERROR) {
955 0 : SPDK_ERRLOG("Error translating address\n");
956 0 : rc = -EFAULT;
957 0 : goto error;
958 : }
959 : }
960 :
961 0 : seg_len = spdk_min(seg_len, len);
962 :
963 0 : desc->opcode = IDXD_OPCODE_MEMFILL;
964 0 : desc->pattern = fill_pattern;
965 0 : desc->dst_addr = dst_addr;
966 0 : desc->xfer_size = seg_len;
967 0 : _update_write_flags(chan, desc);
968 :
969 0 : len -= seg_len;
970 0 : dst += seg_len;
971 : }
972 : }
973 :
974 0 : return _idxd_flush_batch(chan);
975 :
976 0 : error:
977 0 : chan->batch->index -= count;
978 0 : return rc;
979 : }
980 :
981 : int
982 0 : spdk_idxd_submit_crc32c(struct spdk_idxd_io_channel *chan,
983 : struct iovec *siov, size_t siovcnt,
984 : uint32_t seed, uint32_t *crc_dst, int flags,
985 : spdk_idxd_req_cb cb_fn, void *cb_arg)
986 : {
987 0 : struct idxd_hw_desc *desc;
988 0 : struct idxd_ops *first_op, *op;
989 : uint64_t src_addr;
990 : int rc, count;
991 0 : uint64_t len, seg_len;
992 : void *src;
993 : size_t i;
994 0 : uint64_t prev_crc = 0;
995 :
996 0 : assert(chan != NULL);
997 0 : assert(siov != NULL);
998 :
999 0 : rc = _idxd_setup_batch(chan);
1000 0 : if (rc) {
1001 0 : return rc;
1002 : }
1003 :
1004 0 : count = 0;
1005 0 : op = NULL;
1006 0 : first_op = NULL;
1007 0 : for (i = 0; i < siovcnt; i++) {
1008 0 : len = siov[i].iov_len;
1009 0 : src = siov[i].iov_base;
1010 :
1011 0 : while (len > 0) {
1012 0 : if (first_op == NULL) {
1013 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
1014 0 : if (rc) {
1015 0 : goto error;
1016 : }
1017 :
1018 0 : first_op = op;
1019 : } else {
1020 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
1021 0 : if (rc) {
1022 0 : goto error;
1023 : }
1024 :
1025 0 : first_op->count++;
1026 0 : op->parent = first_op;
1027 : }
1028 :
1029 0 : count++;
1030 :
1031 0 : seg_len = len;
1032 0 : if (chan->pasid_enabled) {
1033 0 : src_addr = (uint64_t)src;
1034 : } else {
1035 0 : src_addr = spdk_vtophys(src, &seg_len);
1036 0 : if (src_addr == SPDK_VTOPHYS_ERROR) {
1037 0 : SPDK_ERRLOG("Error translating address\n");
1038 0 : rc = -EFAULT;
1039 0 : goto error;
1040 : }
1041 : }
1042 :
1043 0 : seg_len = spdk_min(seg_len, len);
1044 :
1045 0 : desc->opcode = IDXD_OPCODE_CRC32C_GEN;
1046 0 : desc->src_addr = src_addr;
1047 0 : if (op == first_op) {
1048 0 : desc->crc32c.seed = seed;
1049 : } else {
1050 0 : desc->flags |= IDXD_FLAG_FENCE | IDXD_FLAG_CRC_READ_CRC_SEED;
1051 0 : desc->crc32c.addr = prev_crc;
1052 : }
1053 :
1054 0 : desc->xfer_size = seg_len;
1055 0 : prev_crc = desc->completion_addr + offsetof(struct dsa_hw_comp_record, crc32c_val);
1056 :
1057 0 : len -= seg_len;
1058 0 : src += seg_len;
1059 : }
1060 : }
1061 :
1062 : /* Only the last op copies the crc to the destination */
1063 0 : if (op) {
1064 0 : op->crc_dst = crc_dst;
1065 : }
1066 :
1067 0 : return _idxd_flush_batch(chan);
1068 :
1069 0 : error:
1070 0 : chan->batch->index -= count;
1071 0 : return rc;
1072 : }
1073 :
1074 : int
1075 0 : spdk_idxd_submit_copy_crc32c(struct spdk_idxd_io_channel *chan,
1076 : struct iovec *diov, size_t diovcnt,
1077 : struct iovec *siov, size_t siovcnt,
1078 : uint32_t seed, uint32_t *crc_dst, int flags,
1079 : spdk_idxd_req_cb cb_fn, void *cb_arg)
1080 : {
1081 0 : struct idxd_hw_desc *desc;
1082 0 : struct idxd_ops *first_op, *op;
1083 0 : void *src, *dst;
1084 0 : uint64_t src_addr, dst_addr;
1085 : int rc, count;
1086 : uint64_t len, seg_len;
1087 0 : struct spdk_ioviter iter;
1088 0 : struct idxd_vtophys_iter vtophys_iter;
1089 0 : uint64_t prev_crc = 0;
1090 :
1091 0 : assert(chan != NULL);
1092 0 : assert(diov != NULL);
1093 0 : assert(siov != NULL);
1094 :
1095 0 : rc = _idxd_setup_batch(chan);
1096 0 : if (rc) {
1097 0 : return rc;
1098 : }
1099 :
1100 0 : count = 0;
1101 0 : op = NULL;
1102 0 : first_op = NULL;
1103 0 : for (len = spdk_ioviter_first(&iter, siov, siovcnt, diov, diovcnt, &src, &dst);
1104 0 : len > 0;
1105 0 : len = spdk_ioviter_next(&iter, &src, &dst)) {
1106 :
1107 :
1108 0 : idxd_vtophys_iter_init(chan, &vtophys_iter, src, dst, len);
1109 :
1110 0 : while (len > 0) {
1111 0 : if (first_op == NULL) {
1112 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
1113 0 : if (rc) {
1114 0 : goto error;
1115 : }
1116 :
1117 0 : first_op = op;
1118 : } else {
1119 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
1120 0 : if (rc) {
1121 0 : goto error;
1122 : }
1123 :
1124 0 : first_op->count++;
1125 0 : op->parent = first_op;
1126 : }
1127 :
1128 0 : count++;
1129 :
1130 0 : src_addr = 0;
1131 0 : dst_addr = 0;
1132 0 : seg_len = idxd_vtophys_iter_next(&vtophys_iter, &src_addr, &dst_addr);
1133 0 : if (seg_len == SPDK_VTOPHYS_ERROR) {
1134 0 : rc = -EFAULT;
1135 0 : goto error;
1136 : }
1137 :
1138 0 : desc->opcode = IDXD_OPCODE_COPY_CRC;
1139 0 : desc->dst_addr = dst_addr;
1140 0 : desc->src_addr = src_addr;
1141 0 : _update_write_flags(chan, desc);
1142 0 : if (op == first_op) {
1143 0 : desc->crc32c.seed = seed;
1144 : } else {
1145 0 : desc->flags |= IDXD_FLAG_FENCE | IDXD_FLAG_CRC_READ_CRC_SEED;
1146 0 : desc->crc32c.addr = prev_crc;
1147 : }
1148 :
1149 0 : desc->xfer_size = seg_len;
1150 0 : prev_crc = desc->completion_addr + offsetof(struct dsa_hw_comp_record, crc32c_val);
1151 :
1152 0 : len -= seg_len;
1153 : }
1154 : }
1155 :
1156 : /* Only the last op copies the crc to the destination */
1157 0 : if (op) {
1158 0 : op->crc_dst = crc_dst;
1159 : }
1160 :
1161 0 : return _idxd_flush_batch(chan);
1162 :
1163 0 : error:
1164 0 : chan->batch->index -= count;
1165 0 : return rc;
1166 : }
1167 :
1168 : static inline int
1169 0 : _idxd_submit_compress_single(struct spdk_idxd_io_channel *chan, void *dst, const void *src,
1170 : uint64_t nbytes_dst, uint64_t nbytes_src, uint32_t *output_size,
1171 : int flags, spdk_idxd_req_cb cb_fn, void *cb_arg)
1172 : {
1173 0 : struct idxd_hw_desc *desc;
1174 0 : struct idxd_ops *op;
1175 0 : uint64_t src_addr, dst_addr;
1176 : int rc;
1177 :
1178 : /* Common prep. */
1179 0 : rc = _idxd_prep_command(chan, cb_fn, cb_arg, flags, &desc, &op);
1180 0 : if (rc) {
1181 0 : return rc;
1182 : }
1183 :
1184 0 : rc = _vtophys(chan, src, &src_addr, nbytes_src);
1185 0 : if (rc) {
1186 0 : goto error;
1187 : }
1188 :
1189 0 : rc = _vtophys(chan, dst, &dst_addr, nbytes_dst);
1190 0 : if (rc) {
1191 0 : goto error;
1192 : }
1193 :
1194 : /* Command specific. */
1195 0 : desc->opcode = IDXD_OPCODE_COMPRESS;
1196 0 : desc->src1_addr = src_addr;
1197 0 : desc->dst_addr = dst_addr;
1198 0 : desc->src1_size = nbytes_src;
1199 0 : desc->iaa.max_dst_size = nbytes_dst;
1200 0 : desc->iaa.src2_size = sizeof(struct iaa_aecs);
1201 0 : desc->iaa.src2_addr = chan->idxd->aecs_addr;
1202 0 : desc->flags |= IAA_FLAG_RD_SRC2_AECS;
1203 0 : desc->compr_flags = IAA_COMP_FLAGS;
1204 0 : op->output_size = output_size;
1205 :
1206 0 : _submit_to_hw(chan, op);
1207 0 : return 0;
1208 0 : error:
1209 0 : STAILQ_INSERT_TAIL(&chan->ops_pool, op, link);
1210 0 : return rc;
1211 : }
1212 :
1213 : int
1214 0 : spdk_idxd_submit_compress(struct spdk_idxd_io_channel *chan,
1215 : void *dst, uint64_t nbytes,
1216 : struct iovec *siov, uint32_t siovcnt, uint32_t *output_size,
1217 : int flags, spdk_idxd_req_cb cb_fn, void *cb_arg)
1218 : {
1219 0 : assert(chan != NULL);
1220 0 : assert(dst != NULL);
1221 0 : assert(siov != NULL);
1222 :
1223 0 : if (siovcnt == 1) {
1224 : /* Simple case - copying one buffer to another */
1225 0 : if (nbytes < siov[0].iov_len) {
1226 0 : return -EINVAL;
1227 : }
1228 :
1229 0 : return _idxd_submit_compress_single(chan, dst, siov[0].iov_base,
1230 : nbytes, siov[0].iov_len,
1231 : output_size, flags, cb_fn, cb_arg);
1232 : }
1233 : /* TODO: vectored support */
1234 0 : return -EINVAL;
1235 : }
1236 :
1237 : static inline int
1238 0 : _idxd_submit_decompress_single(struct spdk_idxd_io_channel *chan, void *dst, const void *src,
1239 : uint64_t nbytes_dst, uint64_t nbytes, int flags, spdk_idxd_req_cb cb_fn, void *cb_arg)
1240 : {
1241 0 : struct idxd_hw_desc *desc;
1242 0 : struct idxd_ops *op;
1243 0 : uint64_t src_addr, dst_addr;
1244 : int rc;
1245 :
1246 : /* Common prep. */
1247 0 : rc = _idxd_prep_command(chan, cb_fn, cb_arg, flags, &desc, &op);
1248 0 : if (rc) {
1249 0 : return rc;
1250 : }
1251 :
1252 0 : rc = _vtophys(chan, src, &src_addr, nbytes);
1253 0 : if (rc) {
1254 0 : goto error;
1255 : }
1256 :
1257 0 : rc = _vtophys(chan, dst, &dst_addr, nbytes_dst);
1258 0 : if (rc) {
1259 0 : goto error;
1260 : }
1261 :
1262 : /* Command specific. */
1263 0 : desc->opcode = IDXD_OPCODE_DECOMPRESS;
1264 0 : desc->src1_addr = src_addr;
1265 0 : desc->dst_addr = dst_addr;
1266 0 : desc->src1_size = nbytes;
1267 0 : desc->iaa.max_dst_size = nbytes_dst;
1268 0 : desc->decompr_flags = IAA_DECOMP_FLAGS;
1269 :
1270 0 : _submit_to_hw(chan, op);
1271 0 : return 0;
1272 0 : error:
1273 0 : STAILQ_INSERT_TAIL(&chan->ops_pool, op, link);
1274 0 : return rc;
1275 : }
1276 :
1277 : int
1278 0 : spdk_idxd_submit_decompress(struct spdk_idxd_io_channel *chan,
1279 : struct iovec *diov, uint32_t diovcnt,
1280 : struct iovec *siov, uint32_t siovcnt,
1281 : int flags, spdk_idxd_req_cb cb_fn, void *cb_arg)
1282 : {
1283 0 : assert(chan != NULL);
1284 0 : assert(diov != NULL);
1285 0 : assert(siov != NULL);
1286 :
1287 0 : if (diovcnt == 1 && siovcnt == 1) {
1288 : /* Simple case - copying one buffer to another */
1289 0 : if (diov[0].iov_len < siov[0].iov_len) {
1290 0 : return -EINVAL;
1291 : }
1292 :
1293 0 : return _idxd_submit_decompress_single(chan, diov[0].iov_base, siov[0].iov_base,
1294 : diov[0].iov_len, siov[0].iov_len,
1295 : flags, cb_fn, cb_arg);
1296 : }
1297 : /* TODO: vectored support */
1298 0 : return -EINVAL;
1299 : }
1300 :
1301 : static inline int
1302 0 : idxd_get_dif_flags(const struct spdk_dif_ctx *ctx, uint8_t *flags)
1303 : {
1304 0 : if (flags == NULL) {
1305 0 : SPDK_ERRLOG("Flag should be non-null");
1306 0 : return -EINVAL;
1307 : }
1308 :
1309 0 : switch (ctx->guard_interval) {
1310 0 : case DATA_BLOCK_SIZE_512:
1311 0 : *flags = IDXD_DIF_FLAG_DIF_BLOCK_SIZE_512;
1312 0 : break;
1313 0 : case DATA_BLOCK_SIZE_520:
1314 0 : *flags = IDXD_DIF_FLAG_DIF_BLOCK_SIZE_520;
1315 0 : break;
1316 0 : case DATA_BLOCK_SIZE_4096:
1317 0 : *flags = IDXD_DIF_FLAG_DIF_BLOCK_SIZE_4096;
1318 0 : break;
1319 0 : case DATA_BLOCK_SIZE_4104:
1320 0 : *flags = IDXD_DIF_FLAG_DIF_BLOCK_SIZE_4104;
1321 0 : break;
1322 0 : default:
1323 0 : SPDK_ERRLOG("Invalid DIF block size %d\n", ctx->block_size - ctx->md_size);
1324 0 : return -EINVAL;
1325 : }
1326 :
1327 0 : return 0;
1328 : }
1329 :
1330 : static inline int
1331 0 : idxd_get_source_dif_flags(const struct spdk_dif_ctx *ctx, uint8_t *flags)
1332 : {
1333 0 : if (flags == NULL) {
1334 0 : SPDK_ERRLOG("Flag should be non-null");
1335 0 : return -EINVAL;
1336 : }
1337 :
1338 0 : *flags = 0;
1339 :
1340 0 : if (!(ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK)) {
1341 0 : *flags |= IDXD_DIF_SOURCE_FLAG_GUARD_CHECK_DISABLE;
1342 : }
1343 :
1344 0 : if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) {
1345 0 : *flags |= IDXD_DIF_SOURCE_FLAG_REF_TAG_CHECK_DISABLE;
1346 : }
1347 :
1348 0 : switch (ctx->dif_type) {
1349 0 : case SPDK_DIF_TYPE1:
1350 : case SPDK_DIF_TYPE2:
1351 : /* If Type 1 or 2 is used, then all DIF checks are disabled when
1352 : * the Application Tag is 0xFFFF.
1353 : */
1354 0 : *flags |= IDXD_DIF_SOURCE_FLAG_APP_TAG_F_DETECT;
1355 0 : break;
1356 0 : case SPDK_DIF_TYPE3:
1357 : /* If Type 3 is used, then all DIF checks are disabled when the
1358 : * Application Tag is 0xFFFF and the Reference Tag is 0xFFFFFFFF
1359 : * (for PI 8 bytes format).
1360 : */
1361 0 : *flags |= IDXD_DIF_SOURCE_FLAG_APP_AND_REF_TAG_F_DETECT;
1362 0 : break;
1363 0 : default:
1364 0 : SPDK_ERRLOG("Invalid DIF type %d\n", ctx->dif_type);
1365 0 : return -EINVAL;
1366 : }
1367 :
1368 0 : return 0;
1369 : }
1370 :
1371 : static inline int
1372 0 : idxd_get_app_tag_mask(const struct spdk_dif_ctx *ctx, uint16_t *app_tag_mask)
1373 : {
1374 0 : if (!(ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK)) {
1375 : /* The Source Application Tag Mask may be set to 0xffff
1376 : * to disable application tag checking */
1377 0 : *app_tag_mask = 0xFFFF;
1378 : } else {
1379 0 : *app_tag_mask = ~ctx->apptag_mask;
1380 : }
1381 :
1382 0 : return 0;
1383 : }
1384 :
1385 : static inline int
1386 0 : idxd_validate_dif_common_params(const struct spdk_dif_ctx *ctx)
1387 : {
1388 : /* Check byte offset from the start of the whole data buffer */
1389 0 : if (ctx->data_offset != 0) {
1390 0 : SPDK_ERRLOG("Byte offset from the start of the whole data buffer must be set to 0.");
1391 0 : return -EINVAL;
1392 : }
1393 :
1394 : /* Check seed value for guard computation */
1395 0 : if (ctx->guard_seed != 0) {
1396 0 : SPDK_ERRLOG("Seed value for guard computation must be set to 0.");
1397 0 : return -EINVAL;
1398 : }
1399 :
1400 : /* Check for supported metadata sizes */
1401 0 : if (ctx->md_size != METADATA_SIZE_8 && ctx->md_size != METADATA_SIZE_16) {
1402 0 : SPDK_ERRLOG("Metadata size %d is not supported.\n", ctx->md_size);
1403 0 : return -EINVAL;
1404 : }
1405 :
1406 : /* Check for supported DIF PI formats */
1407 0 : if (ctx->dif_pi_format != SPDK_DIF_PI_FORMAT_16) {
1408 0 : SPDK_ERRLOG("DIF PI format %d is not supported.\n", ctx->dif_pi_format);
1409 0 : return -EINVAL;
1410 : }
1411 :
1412 : /* Check for supported metadata locations */
1413 0 : if (ctx->md_interleave == false) {
1414 0 : SPDK_ERRLOG("Separated metadata location is not supported.\n");
1415 0 : return -EINVAL;
1416 : }
1417 :
1418 : /* Check for supported DIF alignments */
1419 0 : if (ctx->md_size == METADATA_SIZE_16 &&
1420 0 : (ctx->guard_interval == DATA_BLOCK_SIZE_512 ||
1421 0 : ctx->guard_interval == DATA_BLOCK_SIZE_4096)) {
1422 0 : SPDK_ERRLOG("DIF left alignment in metadata is not supported.\n");
1423 0 : return -EINVAL;
1424 : }
1425 :
1426 : /* Check for supported DIF block sizes */
1427 0 : if ((ctx->block_size - ctx->md_size) != DATA_BLOCK_SIZE_512 &&
1428 0 : (ctx->block_size - ctx->md_size) != DATA_BLOCK_SIZE_4096) {
1429 0 : SPDK_ERRLOG("DIF block size %d is not supported.\n", ctx->block_size - ctx->md_size);
1430 0 : return -EINVAL;
1431 : }
1432 :
1433 0 : return 0;
1434 : }
1435 :
1436 : static inline int
1437 0 : idxd_validate_dif_check_params(const struct spdk_dif_ctx *ctx)
1438 : {
1439 : /* Validate common parameters */
1440 0 : int rc = idxd_validate_dif_common_params(ctx);
1441 0 : if (rc) {
1442 0 : return rc;
1443 : }
1444 :
1445 0 : return 0;
1446 : }
1447 :
1448 : static inline int
1449 0 : idxd_validate_dif_check_buf_align(const struct spdk_dif_ctx *ctx, const uint64_t len)
1450 : {
1451 : /* DSA can only process contiguous memory buffers, multiple of the block size */
1452 0 : if (len % ctx->block_size != 0) {
1453 0 : SPDK_ERRLOG("The memory buffer length (%ld) is not a multiple of block size with metadata (%d).\n",
1454 : len, ctx->block_size);
1455 0 : return -EINVAL;
1456 : }
1457 :
1458 0 : return 0;
1459 : }
1460 :
1461 : int
1462 0 : spdk_idxd_submit_dif_check(struct spdk_idxd_io_channel *chan,
1463 : struct iovec *siov, size_t siovcnt,
1464 : uint32_t num_blocks, const struct spdk_dif_ctx *ctx, int flags,
1465 : spdk_idxd_req_cb cb_fn, void *cb_arg)
1466 : {
1467 0 : struct idxd_hw_desc *desc;
1468 0 : struct idxd_ops *first_op = NULL, *op = NULL;
1469 : uint64_t src_seg_addr, src_seg_len;
1470 0 : uint32_t num_blocks_done = 0;
1471 0 : uint8_t dif_flags = 0, src_dif_flags = 0;
1472 0 : uint16_t app_tag_mask = 0;
1473 0 : int rc, count = 0;
1474 : size_t i;
1475 :
1476 0 : assert(ctx != NULL);
1477 0 : assert(chan != NULL);
1478 0 : assert(siov != NULL);
1479 :
1480 : /* Validate DIF check parameters */
1481 0 : rc = idxd_validate_dif_check_params(ctx);
1482 0 : if (rc) {
1483 0 : return rc;
1484 : }
1485 :
1486 : /* Get DIF flags */
1487 0 : rc = idxd_get_dif_flags(ctx, &dif_flags);
1488 0 : if (rc) {
1489 0 : return rc;
1490 : }
1491 :
1492 : /* Get source DIF flags */
1493 0 : rc = idxd_get_source_dif_flags(ctx, &src_dif_flags);
1494 0 : if (rc) {
1495 0 : return rc;
1496 : }
1497 :
1498 : /* Get AppTag Mask */
1499 0 : rc = idxd_get_app_tag_mask(ctx, &app_tag_mask);
1500 0 : if (rc) {
1501 0 : return rc;
1502 : }
1503 :
1504 0 : rc = _idxd_setup_batch(chan);
1505 0 : if (rc) {
1506 0 : return rc;
1507 : }
1508 :
1509 0 : for (i = 0; i < siovcnt; i++) {
1510 0 : src_seg_addr = (uint64_t)siov[i].iov_base;
1511 0 : src_seg_len = siov[i].iov_len;
1512 :
1513 : /* DSA processes the iovec buffers independently, so the buffers cannot
1514 : * be split (must be multiple of the block size) */
1515 :
1516 : /* Validate the memory buffer alignment */
1517 0 : rc = idxd_validate_dif_check_buf_align(ctx, src_seg_len);
1518 0 : if (rc) {
1519 0 : goto error;
1520 : }
1521 :
1522 0 : if (first_op == NULL) {
1523 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
1524 0 : if (rc) {
1525 0 : goto error;
1526 : }
1527 :
1528 0 : first_op = op;
1529 : } else {
1530 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
1531 0 : if (rc) {
1532 0 : goto error;
1533 : }
1534 :
1535 0 : first_op->count++;
1536 0 : op->parent = first_op;
1537 : }
1538 :
1539 0 : count++;
1540 :
1541 0 : desc->opcode = IDXD_OPCODE_DIF_CHECK;
1542 0 : desc->src_addr = src_seg_addr;
1543 0 : desc->xfer_size = src_seg_len;
1544 0 : desc->dif_chk.flags = dif_flags;
1545 0 : desc->dif_chk.src_flags = src_dif_flags;
1546 0 : desc->dif_chk.app_tag_seed = ctx->app_tag;
1547 0 : desc->dif_chk.app_tag_mask = app_tag_mask;
1548 0 : desc->dif_chk.ref_tag_seed = (uint32_t)ctx->init_ref_tag + num_blocks_done;
1549 :
1550 0 : num_blocks_done += (src_seg_len / ctx->block_size);
1551 : }
1552 :
1553 0 : return _idxd_flush_batch(chan);
1554 :
1555 0 : error:
1556 0 : chan->batch->index -= count;
1557 0 : return rc;
1558 : }
1559 :
1560 : static inline int
1561 0 : idxd_validate_dif_insert_params(const struct spdk_dif_ctx *ctx)
1562 : {
1563 : /* Validate common parameters */
1564 0 : int rc = idxd_validate_dif_common_params(ctx);
1565 0 : if (rc) {
1566 0 : return rc;
1567 : }
1568 :
1569 : /* Check for required DIF flags */
1570 0 : if (!(ctx->dif_flags & SPDK_DIF_FLAGS_GUARD_CHECK)) {
1571 0 : SPDK_ERRLOG("Guard check flag must be set.\n");
1572 0 : return -EINVAL;
1573 : }
1574 :
1575 0 : if (!(ctx->dif_flags & SPDK_DIF_FLAGS_APPTAG_CHECK)) {
1576 0 : SPDK_ERRLOG("Application Tag check flag must be set.\n");
1577 0 : return -EINVAL;
1578 : }
1579 :
1580 0 : if (!(ctx->dif_flags & SPDK_DIF_FLAGS_REFTAG_CHECK)) {
1581 0 : SPDK_ERRLOG("Reference Tag check flag must be set.\n");
1582 0 : return -EINVAL;
1583 : }
1584 :
1585 0 : return 0;
1586 : }
1587 :
1588 : static inline int
1589 0 : idxd_validate_dif_insert_iovecs(const struct spdk_dif_ctx *ctx,
1590 : const struct iovec *diov, const size_t diovcnt,
1591 : const struct iovec *siov, const size_t siovcnt)
1592 : {
1593 : size_t src_len, dst_len;
1594 : uint32_t num_blocks;
1595 : size_t i;
1596 :
1597 0 : if (diovcnt != siovcnt) {
1598 0 : SPDK_ERRLOG("Invalid number of elements in src (%ld) and dst (%ld) iovecs.\n",
1599 : siovcnt, diovcnt);
1600 0 : return -EINVAL;
1601 : }
1602 :
1603 0 : for (i = 0; i < siovcnt; i++) {
1604 0 : src_len = siov[i].iov_len;
1605 0 : dst_len = diov[i].iov_len;
1606 0 : num_blocks = src_len / (ctx->block_size - ctx->md_size);
1607 0 : if (src_len != dst_len - num_blocks * ctx->md_size) {
1608 0 : SPDK_ERRLOG("Invalid length of data in src (%ld) and dst (%ld) in iovecs[%ld].\n",
1609 : src_len, dst_len, i);
1610 0 : return -EINVAL;
1611 : }
1612 : }
1613 :
1614 0 : return 0;
1615 : }
1616 :
1617 : static inline int
1618 0 : idxd_validate_dif_insert_buf_align(const struct spdk_dif_ctx *ctx,
1619 : const uint64_t src_len, const uint64_t dst_len)
1620 : {
1621 : /* DSA can only process contiguous memory buffers, multiple of the block size */
1622 0 : if (src_len % (ctx->block_size - ctx->md_size) != 0) {
1623 0 : SPDK_ERRLOG("The memory source buffer length (%ld) is not a multiple of block size without metadata (%d).\n",
1624 : src_len, ctx->block_size - ctx->md_size);
1625 0 : return -EINVAL;
1626 : }
1627 :
1628 0 : if (dst_len % ctx->block_size != 0) {
1629 0 : SPDK_ERRLOG("The memory destination buffer length (%ld) is not a multiple of block size with metadata (%d).\n",
1630 : dst_len, ctx->block_size);
1631 0 : return -EINVAL;
1632 : }
1633 :
1634 : /* The memory source and destiantion must hold the same number of blocks. */
1635 0 : if (src_len / (ctx->block_size - ctx->md_size) != (dst_len / ctx->block_size)) {
1636 0 : SPDK_ERRLOG("The memory source (%ld) and destiantion (%ld) must hold the same number of blocks.\n",
1637 : src_len / (ctx->block_size - ctx->md_size), (dst_len / ctx->block_size));
1638 0 : return -EINVAL;
1639 : }
1640 :
1641 0 : return 0;
1642 : }
1643 :
1644 : int
1645 0 : spdk_idxd_submit_dif_insert(struct spdk_idxd_io_channel *chan,
1646 : struct iovec *diov, size_t diovcnt,
1647 : struct iovec *siov, size_t siovcnt,
1648 : uint32_t num_blocks, const struct spdk_dif_ctx *ctx, int flags,
1649 : spdk_idxd_req_cb cb_fn, void *cb_arg)
1650 : {
1651 0 : struct idxd_hw_desc *desc;
1652 0 : struct idxd_ops *first_op = NULL, *op = NULL;
1653 : uint64_t src_seg_addr, src_seg_len;
1654 : uint64_t dst_seg_addr, dst_seg_len;
1655 0 : uint32_t num_blocks_done = 0;
1656 0 : uint8_t dif_flags = 0;
1657 0 : int rc, count = 0;
1658 : size_t i;
1659 :
1660 0 : assert(ctx != NULL);
1661 0 : assert(chan != NULL);
1662 0 : assert(siov != NULL);
1663 :
1664 : /* Validate DIF parameters */
1665 0 : rc = idxd_validate_dif_insert_params(ctx);
1666 0 : if (rc) {
1667 0 : return rc;
1668 : }
1669 :
1670 : /* Validate DIF iovec parameters */
1671 0 : rc = idxd_validate_dif_insert_iovecs(ctx, diov, diovcnt, siov, siovcnt);
1672 0 : if (rc) {
1673 0 : return rc;
1674 : }
1675 :
1676 : /* Set DIF flags */
1677 0 : rc = idxd_get_dif_flags(ctx, &dif_flags);
1678 0 : if (rc) {
1679 0 : return rc;
1680 : }
1681 :
1682 0 : rc = _idxd_setup_batch(chan);
1683 0 : if (rc) {
1684 0 : return rc;
1685 : }
1686 :
1687 0 : for (i = 0; i < siovcnt; i++) {
1688 0 : src_seg_addr = (uint64_t)siov[i].iov_base;
1689 0 : src_seg_len = siov[i].iov_len;
1690 0 : dst_seg_addr = (uint64_t)diov[i].iov_base;
1691 0 : dst_seg_len = diov[i].iov_len;
1692 :
1693 : /* DSA processes the iovec buffers independently, so the buffers cannot
1694 : * be split (must be multiple of the block size). The destination memory
1695 : * size needs to be same as the source memory size + metadata size */
1696 :
1697 : /* Validate the memory buffer alignment */
1698 0 : rc = idxd_validate_dif_insert_buf_align(ctx, src_seg_len, dst_seg_len);
1699 0 : if (rc) {
1700 0 : goto error;
1701 : }
1702 :
1703 0 : if (first_op == NULL) {
1704 0 : rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, flags, &desc, &op);
1705 0 : if (rc) {
1706 0 : goto error;
1707 : }
1708 :
1709 0 : first_op = op;
1710 : } else {
1711 0 : rc = _idxd_prep_batch_cmd(chan, NULL, NULL, flags, &desc, &op);
1712 0 : if (rc) {
1713 0 : goto error;
1714 : }
1715 :
1716 0 : first_op->count++;
1717 0 : op->parent = first_op;
1718 : }
1719 :
1720 0 : count++;
1721 :
1722 0 : desc->opcode = IDXD_OPCODE_DIF_INS;
1723 0 : desc->src_addr = src_seg_addr;
1724 0 : desc->dst_addr = dst_seg_addr;
1725 0 : desc->xfer_size = src_seg_len;
1726 0 : desc->dif_ins.flags = dif_flags;
1727 0 : desc->dif_ins.app_tag_seed = ctx->app_tag;
1728 0 : desc->dif_ins.app_tag_mask = ~ctx->apptag_mask;
1729 0 : desc->dif_ins.ref_tag_seed = (uint32_t)ctx->init_ref_tag + num_blocks_done;
1730 :
1731 0 : num_blocks_done += src_seg_len / (ctx->block_size - ctx->md_size);
1732 : }
1733 :
1734 0 : return _idxd_flush_batch(chan);
1735 :
1736 0 : error:
1737 0 : chan->batch->index -= count;
1738 0 : return rc;
1739 : }
1740 :
1741 : int
1742 0 : spdk_idxd_submit_raw_desc(struct spdk_idxd_io_channel *chan,
1743 : struct idxd_hw_desc *_desc,
1744 : spdk_idxd_req_cb cb_fn, void *cb_arg)
1745 : {
1746 0 : struct idxd_hw_desc *desc;
1747 0 : struct idxd_ops *op;
1748 0 : int rc, flags = 0;
1749 : uint64_t comp_addr;
1750 :
1751 0 : assert(chan != NULL);
1752 0 : assert(_desc != NULL);
1753 :
1754 : /* Common prep. */
1755 0 : rc = _idxd_prep_command(chan, cb_fn, cb_arg, flags, &desc, &op);
1756 0 : if (rc) {
1757 0 : return rc;
1758 : }
1759 :
1760 : /* Command specific. */
1761 0 : flags = desc->flags;
1762 0 : comp_addr = desc->completion_addr;
1763 0 : memcpy(desc, _desc, sizeof(*desc));
1764 0 : desc->flags |= flags;
1765 0 : desc->completion_addr = comp_addr;
1766 :
1767 : /* Submit operation. */
1768 0 : _submit_to_hw(chan, op);
1769 :
1770 0 : return 0;
1771 : }
1772 :
1773 : static inline void
1774 0 : _dump_sw_error_reg(struct spdk_idxd_io_channel *chan)
1775 : {
1776 0 : struct spdk_idxd_device *idxd = chan->idxd;
1777 :
1778 0 : assert(idxd != NULL);
1779 0 : idxd->impl->dump_sw_error(idxd, chan->portal);
1780 0 : }
1781 :
1782 : /* TODO: more performance experiments. */
1783 : #define IDXD_COMPLETION(x) ((x) > (0) ? (1) : (0))
1784 : #define IDXD_FAILURE(x) ((x) > (1) ? (1) : (0))
1785 : #define IDXD_SW_ERROR(x) ((x) &= (0x1) ? (1) : (0))
1786 : int
1787 0 : spdk_idxd_process_events(struct spdk_idxd_io_channel *chan)
1788 : {
1789 : struct idxd_ops *op, *tmp, *parent_op;
1790 0 : int status = 0;
1791 0 : int rc2, rc = 0;
1792 : void *cb_arg;
1793 : spdk_idxd_req_cb cb_fn;
1794 :
1795 0 : assert(chan != NULL);
1796 :
1797 0 : STAILQ_FOREACH_SAFE(op, &chan->ops_outstanding, link, tmp) {
1798 0 : if (!IDXD_COMPLETION(op->hw.status)) {
1799 : /*
1800 : * oldest locations are at the head of the list so if
1801 : * we've polled a location that hasn't completed, bail
1802 : * now as there are unlikely to be any more completions.
1803 : */
1804 0 : break;
1805 : }
1806 :
1807 0 : STAILQ_REMOVE_HEAD(&chan->ops_outstanding, link);
1808 0 : rc++;
1809 :
1810 : /* Status is in the same location for both IAA and DSA completion records. */
1811 0 : if (spdk_unlikely(IDXD_FAILURE(op->hw.status))) {
1812 0 : SPDK_ERRLOG("Completion status 0x%x\n", op->hw.status);
1813 0 : status = -EINVAL;
1814 0 : _dump_sw_error_reg(chan);
1815 : }
1816 :
1817 0 : switch (op->desc->opcode) {
1818 0 : case IDXD_OPCODE_BATCH:
1819 0 : SPDK_DEBUGLOG(idxd, "Complete batch %p\n", op->batch);
1820 0 : break;
1821 0 : case IDXD_OPCODE_CRC32C_GEN:
1822 : case IDXD_OPCODE_COPY_CRC:
1823 0 : if (spdk_likely(status == 0 && op->crc_dst != NULL)) {
1824 0 : *op->crc_dst = op->hw.crc32c_val;
1825 0 : *op->crc_dst ^= ~0;
1826 : }
1827 0 : break;
1828 0 : case IDXD_OPCODE_COMPARE:
1829 0 : if (spdk_likely(status == 0)) {
1830 0 : status = op->hw.result;
1831 : }
1832 0 : break;
1833 0 : case IDXD_OPCODE_COMPRESS:
1834 0 : if (spdk_likely(status == 0 && op->output_size != NULL)) {
1835 0 : *op->output_size = op->iaa_hw.output_size;
1836 : }
1837 0 : break;
1838 0 : case IDXD_OPCODE_DIF_CHECK:
1839 0 : if (spdk_unlikely(op->hw.status == IDXD_DSA_STATUS_DIF_ERROR)) {
1840 0 : status = -EIO;
1841 : }
1842 0 : break;
1843 : }
1844 :
1845 : /* TODO: WHAT IF THIS FAILED!? */
1846 0 : op->hw.status = 0;
1847 :
1848 0 : assert(op->count > 0);
1849 0 : op->count--;
1850 :
1851 0 : parent_op = op->parent;
1852 0 : if (parent_op != NULL) {
1853 0 : assert(parent_op->count > 0);
1854 0 : parent_op->count--;
1855 :
1856 0 : if (parent_op->count == 0) {
1857 0 : cb_fn = parent_op->cb_fn;
1858 0 : cb_arg = parent_op->cb_arg;
1859 :
1860 0 : assert(parent_op->batch != NULL);
1861 :
1862 : /*
1863 : * Now that parent_op count is 0, we can release its ref
1864 : * to its batch. We have not released the ref to the batch
1865 : * that the op is pointing to yet, which will be done below.
1866 : */
1867 0 : parent_op->batch->refcnt--;
1868 0 : if (parent_op->batch->refcnt == 0) {
1869 0 : _free_batch(parent_op->batch, chan);
1870 : }
1871 :
1872 0 : if (cb_fn) {
1873 0 : cb_fn(cb_arg, status);
1874 : }
1875 : }
1876 : }
1877 :
1878 0 : if (op->count == 0) {
1879 0 : cb_fn = op->cb_fn;
1880 0 : cb_arg = op->cb_arg;
1881 :
1882 0 : if (op->batch != NULL) {
1883 0 : assert(op->batch->refcnt > 0);
1884 0 : op->batch->refcnt--;
1885 :
1886 0 : if (op->batch->refcnt == 0) {
1887 0 : _free_batch(op->batch, chan);
1888 : }
1889 : } else {
1890 0 : STAILQ_INSERT_HEAD(&chan->ops_pool, op, link);
1891 : }
1892 :
1893 0 : if (cb_fn) {
1894 0 : cb_fn(cb_arg, status);
1895 : }
1896 : }
1897 :
1898 : /* reset the status */
1899 0 : status = 0;
1900 : /* break the processing loop to prevent from starving the rest of the system */
1901 0 : if (rc > IDXD_MAX_COMPLETIONS) {
1902 0 : break;
1903 : }
1904 : }
1905 :
1906 : /* Submit any built-up batch */
1907 0 : if (chan->batch) {
1908 0 : rc2 = idxd_batch_submit(chan, NULL, NULL);
1909 0 : if (rc2) {
1910 0 : assert(rc2 == -EBUSY);
1911 : }
1912 : }
1913 :
1914 0 : return rc;
1915 : }
1916 :
1917 : void
1918 0 : idxd_impl_register(struct spdk_idxd_impl *impl)
1919 : {
1920 0 : STAILQ_INSERT_HEAD(&g_idxd_impls, impl, link);
1921 0 : }
1922 :
1923 0 : SPDK_LOG_REGISTER_COMPONENT(idxd)
|