LCOV - code coverage report
Current view: top level - lib/nvme - nvme_pcie_internal.h (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 6 44 13.6 %
Date: 2024-07-12 15:43:25 Functions: 3 6 50.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2021 Intel Corporation. All rights reserved.
       3             :  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
       4             :  */
       5             : 
       6             : #ifndef __NVME_PCIE_INTERNAL_H__
       7             : #define __NVME_PCIE_INTERNAL_H__
       8             : 
       9             : /*
      10             :  * Number of completion queue entries to process before ringing the
      11             :  *  completion queue doorbell.
      12             :  */
      13             : #define NVME_MIN_COMPLETIONS    (1)
      14             : #define NVME_MAX_COMPLETIONS    (128)
      15             : 
      16             : /*
      17             :  * NVME_MAX_SGL_DESCRIPTORS defines the maximum number of descriptors in one SGL
      18             :  *  segment.
      19             :  */
      20             : #define NVME_MAX_SGL_DESCRIPTORS        (250)
      21             : 
      22             : #define NVME_MAX_PRP_LIST_ENTRIES       (503)
      23             : 
      24             : /* Minimum admin queue size */
      25             : #define NVME_PCIE_MIN_ADMIN_QUEUE_SIZE  (256)
      26             : 
      27             : /* PCIe transport extensions for spdk_nvme_ctrlr */
      28             : struct nvme_pcie_ctrlr {
      29             :         struct spdk_nvme_ctrlr ctrlr;
      30             : 
      31             :         /** NVMe MMIO register space */
      32             :         volatile struct spdk_nvme_registers *regs;
      33             : 
      34             :         /** NVMe MMIO register size */
      35             :         uint64_t regs_size;
      36             : 
      37             :         struct {
      38             :                 /* BAR mapping address which contains controller memory buffer */
      39             :                 void *bar_va;
      40             : 
      41             :                 /* BAR physical address which contains controller memory buffer */
      42             :                 uint64_t bar_pa;
      43             : 
      44             :                 /* Controller memory buffer size in Bytes */
      45             :                 uint64_t size;
      46             : 
      47             :                 /* Current offset of controller memory buffer, relative to start of BAR virt addr */
      48             :                 uint64_t current_offset;
      49             : 
      50             :                 void *mem_register_addr;
      51             :                 size_t mem_register_size;
      52             :         } cmb;
      53             : 
      54             :         struct {
      55             :                 /* BAR mapping address which contains persistent memory region */
      56             :                 void *bar_va;
      57             : 
      58             :                 /* BAR physical address which contains persistent memory region */
      59             :                 uint64_t bar_pa;
      60             : 
      61             :                 /* Persistent memory region size in Bytes */
      62             :                 uint64_t size;
      63             : 
      64             :                 void *mem_register_addr;
      65             :                 size_t mem_register_size;
      66             :         } pmr;
      67             : 
      68             :         /** stride in uint32_t units between doorbell registers (1 = 4 bytes, 2 = 8 bytes, ...) */
      69             :         uint32_t doorbell_stride_u32;
      70             : 
      71             :         /* Opaque handle to associated PCI device. */
      72             :         struct spdk_pci_device *devhandle;
      73             : 
      74             :         /* Flag to indicate the MMIO register has been remapped */
      75             :         bool is_remapped;
      76             : 
      77             :         volatile uint32_t *doorbell_base;
      78             : };
      79             : 
      80             : extern __thread struct nvme_pcie_ctrlr *g_thread_mmio_ctrlr;
      81             : 
      82             : struct nvme_tracker {
      83             :         TAILQ_ENTRY(nvme_tracker)       tq_list;
      84             : 
      85             :         struct nvme_request             *req;
      86             :         uint16_t                        cid;
      87             : 
      88             :         uint16_t                        bad_vtophys : 1;
      89             :         uint16_t                        rsvd0 : 15;
      90             :         uint32_t                        rsvd1;
      91             : 
      92             :         spdk_nvme_cmd_cb                cb_fn;
      93             :         void                            *cb_arg;
      94             : 
      95             :         uint64_t                        prp_sgl_bus_addr;
      96             : 
      97             :         /* Don't move, metadata SGL is always contiguous with Data Block SGL */
      98             :         struct spdk_nvme_sgl_descriptor         meta_sgl;
      99             :         union {
     100             :                 uint64_t                        prp[NVME_MAX_PRP_LIST_ENTRIES];
     101             :                 struct spdk_nvme_sgl_descriptor sgl[NVME_MAX_SGL_DESCRIPTORS];
     102             :         } u;
     103             : };
     104             : /*
     105             :  * struct nvme_tracker must be exactly 4K so that the prp[] array does not cross a page boundary
     106             :  * and so that there is no padding required to meet alignment requirements.
     107             :  */
     108             : SPDK_STATIC_ASSERT(sizeof(struct nvme_tracker) == 4096, "nvme_tracker is not 4K");
     109             : SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, u.sgl) & 7) == 0, "SGL must be Qword aligned");
     110             : SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, meta_sgl) & 7) == 0, "SGL must be Qword aligned");
     111             : 
     112             : struct nvme_pcie_poll_group {
     113             :         struct spdk_nvme_transport_poll_group group;
     114             :         struct spdk_nvme_pcie_stat stats;
     115             : };
     116             : 
     117             : enum nvme_pcie_qpair_state {
     118             :         NVME_PCIE_QPAIR_WAIT_FOR_CQ = 1,
     119             :         NVME_PCIE_QPAIR_WAIT_FOR_SQ,
     120             :         NVME_PCIE_QPAIR_READY,
     121             :         NVME_PCIE_QPAIR_FAILED,
     122             : };
     123             : 
     124             : /* PCIe transport extensions for spdk_nvme_qpair */
     125             : struct nvme_pcie_qpair {
     126             :         /* Submission queue tail doorbell */
     127             :         volatile uint32_t *sq_tdbl;
     128             : 
     129             :         /* Completion queue head doorbell */
     130             :         volatile uint32_t *cq_hdbl;
     131             : 
     132             :         /* Submission queue */
     133             :         struct spdk_nvme_cmd *cmd;
     134             : 
     135             :         /* Completion queue */
     136             :         struct spdk_nvme_cpl *cpl;
     137             : 
     138             :         TAILQ_HEAD(, nvme_tracker) free_tr;
     139             :         TAILQ_HEAD(nvme_outstanding_tr_head, nvme_tracker) outstanding_tr;
     140             : 
     141             :         /* Array of trackers indexed by command ID. */
     142             :         struct nvme_tracker *tr;
     143             : 
     144             :         struct spdk_nvme_pcie_stat *stat;
     145             : 
     146             :         uint16_t num_entries;
     147             : 
     148             :         uint8_t pcie_state;
     149             : 
     150             :         uint8_t retry_count;
     151             : 
     152             :         uint16_t max_completions_cap;
     153             : 
     154             :         uint16_t last_sq_tail;
     155             :         uint16_t sq_tail;
     156             :         uint16_t cq_head;
     157             :         uint16_t sq_head;
     158             : 
     159             :         struct {
     160             :                 uint8_t phase                   : 1;
     161             :                 uint8_t delay_cmd_submit        : 1;
     162             :                 uint8_t has_shadow_doorbell     : 1;
     163             :                 uint8_t has_pending_vtophys_failures : 1;
     164             :                 uint8_t defer_destruction       : 1;
     165             :         } flags;
     166             : 
     167             :         /*
     168             :          * Base qpair structure.
     169             :          * This is located after the hot data in this structure so that the important parts of
     170             :          * nvme_pcie_qpair are in the same cache line.
     171             :          */
     172             :         struct spdk_nvme_qpair qpair;
     173             : 
     174             :         struct {
     175             :                 /* Submission queue shadow tail doorbell */
     176             :                 volatile uint32_t *sq_tdbl;
     177             : 
     178             :                 /* Completion queue shadow head doorbell */
     179             :                 volatile uint32_t *cq_hdbl;
     180             : 
     181             :                 /* Submission queue event index */
     182             :                 volatile uint32_t *sq_eventidx;
     183             : 
     184             :                 /* Completion queue event index */
     185             :                 volatile uint32_t *cq_eventidx;
     186             :         } shadow_doorbell;
     187             : 
     188             :         /*
     189             :          * Fields below this point should not be touched on the normal I/O path.
     190             :          */
     191             : 
     192             :         bool sq_in_cmb;
     193             :         bool shared_stats;
     194             : 
     195             :         uint64_t cmd_bus_addr;
     196             :         uint64_t cpl_bus_addr;
     197             : 
     198             :         struct spdk_nvme_cmd *sq_vaddr;
     199             :         struct spdk_nvme_cpl *cq_vaddr;
     200             : };
     201             : 
     202             : static inline struct nvme_pcie_qpair *
     203          44 : nvme_pcie_qpair(struct spdk_nvme_qpair *qpair)
     204             : {
     205          44 :         return SPDK_CONTAINEROF(qpair, struct nvme_pcie_qpair, qpair);
     206             : }
     207             : 
     208             : static inline struct nvme_pcie_ctrlr *
     209         170 : nvme_pcie_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
     210             : {
     211         170 :         return SPDK_CONTAINEROF(ctrlr, struct nvme_pcie_ctrlr, ctrlr);
     212             : }
     213             : 
     214             : static inline int
     215           2 : nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
     216             : {
     217           2 :         return (uint16_t)(new_idx - event_idx) <= (uint16_t)(new_idx - old);
     218             : }
     219             : 
     220             : static inline bool
     221           0 : nvme_pcie_qpair_update_mmio_required(uint16_t value,
     222             :                                      volatile uint32_t *shadow_db,
     223             :                                      volatile uint32_t *eventidx)
     224             : {
     225             :         uint16_t old;
     226             : 
     227           0 :         spdk_wmb();
     228             : 
     229           0 :         old = *shadow_db;
     230           0 :         *shadow_db = value;
     231             : 
     232             :         /*
     233             :          * Ensure that the doorbell is updated before reading the EventIdx from
     234             :          * memory
     235             :          */
     236           0 :         spdk_mb();
     237             : 
     238           0 :         if (!nvme_pcie_qpair_need_event(*eventidx, value, old)) {
     239           0 :                 return false;
     240             :         }
     241             : 
     242           0 :         return true;
     243             : }
     244             : 
     245             : static inline void
     246           0 : nvme_pcie_qpair_ring_sq_doorbell(struct spdk_nvme_qpair *qpair)
     247             : {
     248           0 :         struct nvme_pcie_qpair  *pqpair = nvme_pcie_qpair(qpair);
     249           0 :         struct nvme_pcie_ctrlr  *pctrlr = nvme_pcie_ctrlr(qpair->ctrlr);
     250           0 :         bool need_mmio = true;
     251             : 
     252           0 :         if (qpair->last_fuse == SPDK_NVME_IO_FLAGS_FUSE_FIRST) {
     253             :                 /* This is first cmd of two fused commands - don't ring doorbell */
     254           0 :                 return;
     255             :         }
     256             : 
     257           0 :         if (spdk_unlikely(pqpair->flags.has_shadow_doorbell)) {
     258           0 :                 pqpair->stat->sq_shadow_doorbell_updates++;
     259           0 :                 need_mmio = nvme_pcie_qpair_update_mmio_required(
     260           0 :                                     pqpair->sq_tail,
     261             :                                     pqpair->shadow_doorbell.sq_tdbl,
     262             :                                     pqpair->shadow_doorbell.sq_eventidx);
     263             :         }
     264             : 
     265           0 :         if (spdk_likely(need_mmio)) {
     266           0 :                 spdk_wmb();
     267           0 :                 pqpair->stat->sq_mmio_doorbell_updates++;
     268           0 :                 g_thread_mmio_ctrlr = pctrlr;
     269           0 :                 spdk_mmio_write_4(pqpair->sq_tdbl, pqpair->sq_tail);
     270           0 :                 g_thread_mmio_ctrlr = NULL;
     271             :         }
     272             : }
     273             : 
     274             : static inline void
     275           0 : nvme_pcie_qpair_ring_cq_doorbell(struct spdk_nvme_qpair *qpair)
     276             : {
     277           0 :         struct nvme_pcie_qpair  *pqpair = nvme_pcie_qpair(qpair);
     278           0 :         struct nvme_pcie_ctrlr  *pctrlr = nvme_pcie_ctrlr(qpair->ctrlr);
     279           0 :         bool need_mmio = true;
     280             : 
     281           0 :         if (spdk_unlikely(pqpair->flags.has_shadow_doorbell)) {
     282           0 :                 pqpair->stat->cq_shadow_doorbell_updates++;
     283           0 :                 need_mmio = nvme_pcie_qpair_update_mmio_required(
     284           0 :                                     pqpair->cq_head,
     285             :                                     pqpair->shadow_doorbell.cq_hdbl,
     286             :                                     pqpair->shadow_doorbell.cq_eventidx);
     287             :         }
     288             : 
     289           0 :         if (spdk_likely(need_mmio)) {
     290           0 :                 pqpair->stat->cq_mmio_doorbell_updates++;
     291           0 :                 g_thread_mmio_ctrlr = pctrlr;
     292           0 :                 spdk_mmio_write_4(pqpair->cq_hdbl, pqpair->cq_head);
     293           0 :                 g_thread_mmio_ctrlr = NULL;
     294             :         }
     295           0 : }
     296             : 
     297             : int nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair);
     298             : int nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
     299             :                               const struct spdk_nvme_io_qpair_opts *opts);
     300             : int nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t num_entries);
     301             : void nvme_pcie_qpair_insert_pending_admin_request(struct spdk_nvme_qpair *qpair,
     302             :                 struct nvme_request *req, struct spdk_nvme_cpl *cpl);
     303             : void nvme_pcie_qpair_complete_pending_admin_request(struct spdk_nvme_qpair *qpair);
     304             : int nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
     305             :                                      struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
     306             :                                      void *cb_arg);
     307             : int nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
     308             :                                      struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
     309             : int nvme_pcie_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
     310             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
     311             : int nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
     312             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
     313             : int nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
     314             : void nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
     315             : void nvme_pcie_qpair_abort_trackers(struct spdk_nvme_qpair *qpair, uint32_t dnr);
     316             : void nvme_pcie_qpair_manual_complete_tracker(struct spdk_nvme_qpair *qpair,
     317             :                 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
     318             :                 bool print_on_error);
     319             : void nvme_pcie_qpair_complete_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr,
     320             :                                       struct spdk_nvme_cpl *cpl, bool print_on_error);
     321             : void nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr);
     322             : void nvme_pcie_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
     323             : void nvme_pcie_admin_qpair_destroy(struct spdk_nvme_qpair *qpair);
     324             : void nvme_pcie_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
     325             : int32_t nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair,
     326             :                 uint32_t max_completions);
     327             : int nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair);
     328             : struct spdk_nvme_qpair *nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
     329             :                 const struct spdk_nvme_io_qpair_opts *opts);
     330             : int nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
     331             : int nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
     332             : int nvme_pcie_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
     333             :                                    struct spdk_nvme_transport_poll_group_stat **_stats);
     334             : void nvme_pcie_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
     335             :                                      struct spdk_nvme_transport_poll_group_stat *stats);
     336             : 
     337             : struct spdk_nvme_transport_poll_group *nvme_pcie_poll_group_create(void);
     338             : int nvme_pcie_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
     339             : int nvme_pcie_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
     340             : int nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
     341             :                              struct spdk_nvme_qpair *qpair);
     342             : int nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
     343             :                                 struct spdk_nvme_qpair *qpair);
     344             : int64_t nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
     345             :                 uint32_t completions_per_qpair,
     346             :                 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
     347             : int nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
     348             : 
     349             : #endif

Generated by: LCOV version 1.15