LCOV - code coverage report
Current view: top level - module/accel/iaa - accel_iaa.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 0 184 0.0 %
Date: 2024-11-05 10:06:02 Functions: 0 21 0.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2022 Intel Corporation.
       3             :  *   Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
       4             :  *   All rights reserved.
       5             :  */
       6             : 
       7             : #include "accel_iaa.h"
       8             : 
       9             : #include "spdk/stdinc.h"
      10             : 
      11             : #include "spdk/accel_module.h"
      12             : #include "spdk/log.h"
      13             : #include "spdk_internal/idxd.h"
      14             : 
      15             : #include "spdk/env.h"
      16             : #include "spdk/event.h"
      17             : #include "spdk/thread.h"
      18             : #include "spdk/idxd.h"
      19             : #include "spdk/util.h"
      20             : #include "spdk/json.h"
      21             : #include "spdk/trace.h"
      22             : #include "spdk_internal/trace_defs.h"
      23             : 
      24             : static bool g_iaa_enable = false;
      25             : 
      26             : enum channel_state {
      27             :         IDXD_CHANNEL_ACTIVE,
      28             :         IDXD_CHANNEL_ERROR,
      29             : };
      30             : 
      31             : static bool g_iaa_initialized = false;
      32             : 
      33             : struct idxd_device {
      34             :         struct spdk_idxd_device         *iaa;
      35             :         TAILQ_ENTRY(idxd_device)        tailq;
      36             : };
      37             : 
      38             : static TAILQ_HEAD(, idxd_device) g_iaa_devices = TAILQ_HEAD_INITIALIZER(g_iaa_devices);
      39             : static struct idxd_device *g_next_dev = NULL;
      40             : static uint32_t g_num_devices = 0;
      41             : static pthread_mutex_t g_dev_lock = PTHREAD_MUTEX_INITIALIZER;
      42             : 
      43             : struct idxd_task {
      44             :         struct spdk_accel_task  task;
      45             :         struct idxd_io_channel  *chan;
      46             : };
      47             : 
      48             : struct idxd_io_channel {
      49             :         struct spdk_idxd_io_channel     *chan;
      50             :         struct idxd_device              *dev;
      51             :         enum channel_state              state;
      52             :         struct spdk_poller              *poller;
      53             :         uint32_t                        num_outstanding;
      54             :         STAILQ_HEAD(, spdk_accel_task)  queued_tasks;
      55             : };
      56             : 
      57             : static struct spdk_io_channel *iaa_get_io_channel(void);
      58             : 
      59             : static struct idxd_device *
      60           0 : idxd_select_device(struct idxd_io_channel *chan)
      61             : {
      62           0 :         uint32_t count = 0;
      63             :         struct idxd_device *dev;
      64           0 :         uint32_t numa_id = spdk_env_get_numa_id(spdk_env_get_current_core());
      65             : 
      66             :         /*
      67             :          * We allow channels to share underlying devices,
      68             :          * selection is round-robin based with a limitation
      69             :          * on how many channel can share one device.
      70             :          */
      71             :         do {
      72             :                 /* select next device */
      73           0 :                 pthread_mutex_lock(&g_dev_lock);
      74           0 :                 g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
      75           0 :                 if (g_next_dev == NULL) {
      76           0 :                         g_next_dev = TAILQ_FIRST(&g_iaa_devices);
      77             :                 }
      78           0 :                 dev = g_next_dev;
      79           0 :                 pthread_mutex_unlock(&g_dev_lock);
      80             : 
      81           0 :                 if (numa_id != spdk_idxd_get_socket(dev->iaa)) {
      82           0 :                         continue;
      83             :                 }
      84             : 
      85             :                 /*
      86             :                  * Now see if a channel is available on this one. We only
      87             :                  * allow a specific number of channels to share a device
      88             :                  * to limit outstanding IO for flow control purposes.
      89             :                  */
      90           0 :                 chan->chan = spdk_idxd_get_channel(dev->iaa);
      91           0 :                 if (chan->chan != NULL) {
      92           0 :                         SPDK_DEBUGLOG(accel_iaa, "On socket %d using device on numa %d\n",
      93             :                                       numa_id, spdk_idxd_get_socket(dev->iaa));
      94           0 :                         return dev;
      95             :                 }
      96           0 :         } while (++count < g_num_devices);
      97             : 
      98             :         /* We are out of available channels and/or devices for the local socket. We fix the number
      99             :          * of channels that we allocate per device and only allocate devices on the same socket
     100             :          * that the current thread is on. If on a 2 socket system it may be possible to avoid
     101             :          * this situation by spreading threads across the sockets.
     102             :          */
     103           0 :         SPDK_ERRLOG("No more IAA devices available on the local socket.\n");
     104           0 :         return NULL;
     105             : }
     106             : 
     107             : static void
     108           0 : iaa_done(void *cb_arg, int status)
     109             : {
     110           0 :         struct idxd_task *idxd_task = cb_arg;
     111             :         struct idxd_io_channel *chan;
     112             : 
     113           0 :         chan = idxd_task->chan;
     114             : 
     115           0 :         assert(chan->num_outstanding > 0);
     116           0 :         spdk_trace_record(TRACE_ACCEL_IAA_OP_COMPLETE, 0, 0, 0, chan->num_outstanding - 1);
     117           0 :         chan->num_outstanding--;
     118             : 
     119           0 :         spdk_accel_task_complete(&idxd_task->task, status);
     120           0 : }
     121             : 
     122             : static int
     123           0 : _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task)
     124             : {
     125           0 :         struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
     126             :         struct idxd_task *idxd_task;
     127           0 :         int rc = 0;
     128           0 :         int flags = 0;
     129             : 
     130           0 :         idxd_task = SPDK_CONTAINEROF(task, struct idxd_task, task);
     131           0 :         idxd_task->chan = chan;
     132             : 
     133             :         /* TODO: iovec support */
     134           0 :         if (task->d.iovcnt > 1 || task->s.iovcnt > 1) {
     135           0 :                 SPDK_ERRLOG("fatal: IAA does not support > 1 iovec\n");
     136           0 :                 assert(0);
     137             :         }
     138             : 
     139           0 :         switch (task->op_code) {
     140           0 :         case SPDK_ACCEL_OPC_COMPRESS:
     141           0 :                 rc = spdk_idxd_submit_compress(chan->chan, task->d.iovs[0].iov_base, task->d.iovs[0].iov_len,
     142             :                                                task->s.iovs, task->s.iovcnt, task->output_size, flags,
     143             :                                                iaa_done, idxd_task);
     144           0 :                 break;
     145           0 :         case SPDK_ACCEL_OPC_DECOMPRESS:
     146           0 :                 rc = spdk_idxd_submit_decompress(chan->chan, task->d.iovs, task->d.iovcnt, task->s.iovs,
     147             :                                                  task->s.iovcnt, flags, iaa_done, idxd_task);
     148           0 :                 break;
     149           0 :         default:
     150           0 :                 assert(false);
     151             :                 rc = -EINVAL;
     152             :                 break;
     153             :         }
     154             : 
     155           0 :         if (rc == 0) {
     156           0 :                 chan->num_outstanding++;
     157           0 :                 spdk_trace_record(TRACE_ACCEL_IAA_OP_SUBMIT, 0, 0, 0, chan->num_outstanding);
     158             :         }
     159             : 
     160           0 :         return rc;
     161             : }
     162             : 
     163             : static int
     164           0 : iaa_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *first_task)
     165             : {
     166           0 :         struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
     167             :         struct spdk_accel_task *task, *tmp;
     168           0 :         int rc = 0;
     169             : 
     170           0 :         task = first_task;
     171             : 
     172           0 :         if (chan->state == IDXD_CHANNEL_ERROR) {
     173           0 :                 while (task) {
     174           0 :                         tmp = STAILQ_NEXT(task, link);
     175           0 :                         spdk_accel_task_complete(task, -EINVAL);
     176           0 :                         task = tmp;
     177             :                 }
     178           0 :                 return 0;
     179             :         }
     180             : 
     181           0 :         if (!STAILQ_EMPTY(&chan->queued_tasks)) {
     182           0 :                 goto queue_tasks;
     183             :         }
     184             : 
     185             :         /* The caller will either submit a single task or a group of tasks that are
     186             :          * linked together but they cannot be on a list. For example, see idxd_poll()
     187             :          * where a list of queued tasks is being resubmitted, the list they are on
     188             :          * is initialized after saving off the first task from the list which is then
     189             :          * passed in here.  Similar thing is done in the accel framework.
     190             :          */
     191           0 :         while (task) {
     192           0 :                 tmp = STAILQ_NEXT(task, link);
     193           0 :                 rc = _process_single_task(ch, task);
     194             : 
     195           0 :                 if (rc == -EBUSY) {
     196           0 :                         goto queue_tasks;
     197           0 :                 } else if (rc) {
     198           0 :                         spdk_accel_task_complete(task, rc);
     199             :                 }
     200           0 :                 task = tmp;
     201             :         }
     202             : 
     203           0 :         return 0;
     204             : 
     205           0 : queue_tasks:
     206           0 :         while (task != NULL) {
     207           0 :                 tmp = STAILQ_NEXT(task, link);
     208           0 :                 STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link);
     209           0 :                 task = tmp;
     210             :         }
     211           0 :         return 0;
     212             : }
     213             : 
     214             : static int
     215           0 : idxd_poll(void *arg)
     216             : {
     217           0 :         struct idxd_io_channel *chan = arg;
     218           0 :         struct spdk_accel_task *task = NULL;
     219             :         struct idxd_task *idxd_task;
     220             :         int count;
     221             : 
     222           0 :         count = spdk_idxd_process_events(chan->chan);
     223             : 
     224             :         /* Check if there are any pending ops to process if the channel is active */
     225           0 :         if (chan->state == IDXD_CHANNEL_ACTIVE) {
     226             :                 /* Submit queued tasks */
     227           0 :                 if (!STAILQ_EMPTY(&chan->queued_tasks)) {
     228           0 :                         task = STAILQ_FIRST(&chan->queued_tasks);
     229           0 :                         idxd_task = SPDK_CONTAINEROF(task, struct idxd_task, task);
     230             : 
     231           0 :                         STAILQ_INIT(&chan->queued_tasks);
     232             : 
     233           0 :                         iaa_submit_tasks(spdk_io_channel_from_ctx(idxd_task->chan), task);
     234             :                 }
     235             :         }
     236             : 
     237           0 :         return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
     238             : }
     239             : 
     240             : static size_t
     241           0 : accel_iaa_get_ctx_size(void)
     242             : {
     243           0 :         return sizeof(struct idxd_task);
     244             : }
     245             : 
     246             : static bool
     247           0 : iaa_supports_opcode(enum spdk_accel_opcode opc)
     248             : {
     249           0 :         if (!g_iaa_initialized) {
     250           0 :                 return false;
     251             :         }
     252             : 
     253           0 :         switch (opc) {
     254           0 :         case SPDK_ACCEL_OPC_COMPRESS:
     255             :         case SPDK_ACCEL_OPC_DECOMPRESS:
     256           0 :                 return true;
     257           0 :         default:
     258           0 :                 return false;
     259             :         }
     260             : }
     261             : 
     262             : static bool
     263           0 : iaa_compress_supports_algo(enum spdk_accel_comp_algo algo)
     264             : {
     265           0 :         if (algo == SPDK_ACCEL_COMP_ALGO_DEFLATE) {
     266           0 :                 return true;
     267             :         }
     268             : 
     269           0 :         return false;
     270             : }
     271             : 
     272             : static int
     273           0 : iaa_get_compress_level_range(enum spdk_accel_comp_algo algo,
     274             :                              uint32_t *min_level, uint32_t *max_level)
     275             : {
     276           0 :         switch (algo) {
     277           0 :         case SPDK_ACCEL_COMP_ALGO_DEFLATE:
     278           0 :                 *min_level = 0;
     279           0 :                 *max_level = 0;
     280           0 :                 return 0;
     281           0 :         default:
     282           0 :                 return -EINVAL;
     283             :         }
     284             : }
     285             : 
     286             : static int accel_iaa_init(void);
     287             : static void accel_iaa_exit(void *ctx);
     288             : static void accel_iaa_write_config_json(struct spdk_json_write_ctx *w);
     289             : 
     290             : static struct spdk_accel_module_if g_iaa_module = {
     291             :         .module_init                 = accel_iaa_init,
     292             :         .module_fini                 = accel_iaa_exit,
     293             :         .write_config_json           = accel_iaa_write_config_json,
     294             :         .get_ctx_size                = accel_iaa_get_ctx_size,
     295             :         .name                        = "iaa",
     296             :         .supports_opcode             = iaa_supports_opcode,
     297             :         .get_io_channel              = iaa_get_io_channel,
     298             :         .submit_tasks                = iaa_submit_tasks,
     299             :         .compress_supports_algo      = iaa_compress_supports_algo,
     300             :         .get_compress_level_range    = iaa_get_compress_level_range,
     301             : };
     302             : 
     303             : static int
     304           0 : idxd_create_cb(void *io_device, void *ctx_buf)
     305             : {
     306           0 :         struct idxd_io_channel *chan = ctx_buf;
     307             :         struct idxd_device *iaa;
     308             : 
     309           0 :         iaa = idxd_select_device(chan);
     310           0 :         if (iaa == NULL) {
     311           0 :                 SPDK_ERRLOG("Failed to get an idxd channel\n");
     312           0 :                 return -EINVAL;
     313             :         }
     314             : 
     315           0 :         chan->dev = iaa;
     316           0 :         chan->poller = SPDK_POLLER_REGISTER(idxd_poll, chan, 0);
     317           0 :         STAILQ_INIT(&chan->queued_tasks);
     318           0 :         chan->num_outstanding = 0;
     319           0 :         chan->state = IDXD_CHANNEL_ACTIVE;
     320             : 
     321           0 :         return 0;
     322             : }
     323             : 
     324             : static void
     325           0 : idxd_destroy_cb(void *io_device, void *ctx_buf)
     326             : {
     327           0 :         struct idxd_io_channel *chan = ctx_buf;
     328             : 
     329           0 :         spdk_poller_unregister(&chan->poller);
     330           0 :         spdk_idxd_put_channel(chan->chan);
     331           0 : }
     332             : 
     333             : static struct spdk_io_channel *
     334           0 : iaa_get_io_channel(void)
     335             : {
     336           0 :         return spdk_get_io_channel(&g_iaa_module);
     337             : }
     338             : 
     339             : static void
     340           0 : attach_cb(void *cb_ctx, struct spdk_idxd_device *iaa)
     341             : {
     342             :         struct idxd_device *dev;
     343             : 
     344           0 :         dev = calloc(1, sizeof(*dev));
     345           0 :         if (dev == NULL) {
     346           0 :                 SPDK_ERRLOG("Failed to allocate device struct\n");
     347           0 :                 return;
     348             :         }
     349             : 
     350           0 :         dev->iaa = iaa;
     351           0 :         if (g_next_dev == NULL) {
     352           0 :                 g_next_dev = dev;
     353             :         }
     354             : 
     355           0 :         TAILQ_INSERT_TAIL(&g_iaa_devices, dev, tailq);
     356           0 :         g_num_devices++;
     357             : }
     358             : 
     359             : int
     360           0 : accel_iaa_enable_probe(void)
     361             : {
     362             :         int rc;
     363             : 
     364           0 :         if (g_iaa_enable) {
     365           0 :                 return -EALREADY;
     366             :         }
     367             : 
     368             :         /* TODO initially only support user mode w/IAA */
     369           0 :         rc = spdk_idxd_set_config(false);
     370           0 :         if (rc != 0) {
     371           0 :                 return rc;
     372             :         }
     373             : 
     374           0 :         spdk_accel_module_list_add(&g_iaa_module);
     375           0 :         g_iaa_enable = true;
     376             : 
     377           0 :         return 0;
     378             : }
     379             : 
     380             : static bool
     381           0 : caller_probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
     382             : {
     383           0 :         if (dev->id.device_id == PCI_DEVICE_ID_INTEL_IAA) {
     384           0 :                 return true;
     385             :         }
     386             : 
     387           0 :         return false;
     388             : }
     389             : 
     390             : static int
     391           0 : accel_iaa_init(void)
     392             : {
     393           0 :         if (!g_iaa_enable) {
     394           0 :                 assert(0);
     395             :                 return -EINVAL;
     396             :         }
     397             : 
     398           0 :         if (spdk_idxd_probe(NULL, attach_cb, caller_probe_cb) != 0) {
     399           0 :                 SPDK_ERRLOG("spdk_idxd_probe() failed\n");
     400           0 :                 return -EINVAL;
     401             :         }
     402             : 
     403           0 :         if (TAILQ_EMPTY(&g_iaa_devices)) {
     404           0 :                 return -ENODEV;
     405             :         }
     406             : 
     407           0 :         g_iaa_initialized = true;
     408           0 :         spdk_io_device_register(&g_iaa_module, idxd_create_cb, idxd_destroy_cb,
     409             :                                 sizeof(struct idxd_io_channel), "iaa_accel_module");
     410           0 :         return 0;
     411             : }
     412             : 
     413             : static void
     414           0 : accel_iaa_exit(void *ctx)
     415             : {
     416             :         struct idxd_device *dev;
     417             : 
     418           0 :         if (g_iaa_initialized) {
     419           0 :                 spdk_io_device_unregister(&g_iaa_module, NULL);
     420           0 :                 g_iaa_initialized = false;
     421             :         }
     422             : 
     423           0 :         while (!TAILQ_EMPTY(&g_iaa_devices)) {
     424           0 :                 dev = TAILQ_FIRST(&g_iaa_devices);
     425           0 :                 TAILQ_REMOVE(&g_iaa_devices, dev, tailq);
     426           0 :                 spdk_idxd_detach(dev->iaa);
     427           0 :                 free(dev);
     428             :         }
     429             : 
     430           0 :         spdk_accel_module_finish();
     431           0 : }
     432             : 
     433             : static void
     434           0 : accel_iaa_write_config_json(struct spdk_json_write_ctx *w)
     435             : {
     436           0 :         if (g_iaa_enable) {
     437           0 :                 spdk_json_write_object_begin(w);
     438           0 :                 spdk_json_write_named_string(w, "method", "iaa_scan_accel_module");
     439           0 :                 spdk_json_write_object_end(w);
     440             :         }
     441           0 : }
     442             : 
     443             : static void
     444           0 : iaa_trace(void)
     445             : {
     446           0 :         spdk_trace_register_description("IAA_OP_SUBMIT", TRACE_ACCEL_IAA_OP_SUBMIT, OWNER_TYPE_NONE,
     447             :                                         OBJECT_NONE, 0, SPDK_TRACE_ARG_TYPE_INT, "count");
     448           0 :         spdk_trace_register_description("IAA_OP_COMPLETE", TRACE_ACCEL_IAA_OP_COMPLETE, OWNER_TYPE_NONE,
     449             :                                         OBJECT_NONE, 0, SPDK_TRACE_ARG_TYPE_INT, "count");
     450           0 : }
     451           0 : SPDK_TRACE_REGISTER_FN(iaa_trace, "iaa", TRACE_GROUP_ACCEL_IAA)
     452             : 
     453           0 : SPDK_LOG_REGISTER_COMPONENT(accel_iaa)

Generated by: LCOV version 1.15