LCOV - code coverage report
Current view: top level - lib/nvme - nvme_pcie_internal.h (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 6 44 13.6 %
Date: 2024-11-05 10:06:02 Functions: 3 6 50.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2021 Intel Corporation. All rights reserved.
       3             :  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
       4             :  */
       5             : 
       6             : #ifndef __NVME_PCIE_INTERNAL_H__
       7             : #define __NVME_PCIE_INTERNAL_H__
       8             : 
       9             : /*
      10             :  * Number of completion queue entries to process before ringing the
      11             :  *  completion queue doorbell.
      12             :  */
      13             : #define NVME_MIN_COMPLETIONS    (1)
      14             : #define NVME_MAX_COMPLETIONS    (128)
      15             : 
      16             : /*
      17             :  * NVME_MAX_SGL_DESCRIPTORS defines the maximum number of descriptors in one SGL
      18             :  *  segment.
      19             :  */
      20             : #define NVME_MAX_SGL_DESCRIPTORS        (250)
      21             : 
      22             : #define NVME_MAX_PRP_LIST_ENTRIES       (503)
      23             : 
      24             : /* Minimum admin queue size */
      25             : #define NVME_PCIE_MIN_ADMIN_QUEUE_SIZE  (256)
      26             : 
      27             : /* PCIe transport extensions for spdk_nvme_ctrlr */
      28             : struct nvme_pcie_ctrlr {
      29             :         struct spdk_nvme_ctrlr ctrlr;
      30             : 
      31             :         /** NVMe MMIO register space */
      32             :         volatile struct spdk_nvme_registers *regs;
      33             : 
      34             :         /** NVMe MMIO register size */
      35             :         uint64_t regs_size;
      36             : 
      37             :         struct {
      38             :                 /* BAR mapping address which contains controller memory buffer */
      39             :                 void *bar_va;
      40             : 
      41             :                 /* BAR physical address which contains controller memory buffer */
      42             :                 uint64_t bar_pa;
      43             : 
      44             :                 /* Controller memory buffer size in Bytes */
      45             :                 uint64_t size;
      46             : 
      47             :                 /* Current offset of controller memory buffer, relative to start of BAR virt addr */
      48             :                 uint64_t current_offset;
      49             : 
      50             :                 void *mem_register_addr;
      51             :                 size_t mem_register_size;
      52             :         } cmb;
      53             : 
      54             :         struct {
      55             :                 /* BAR mapping address which contains persistent memory region */
      56             :                 void *bar_va;
      57             : 
      58             :                 /* BAR physical address which contains persistent memory region */
      59             :                 uint64_t bar_pa;
      60             : 
      61             :                 /* Persistent memory region size in Bytes */
      62             :                 uint64_t size;
      63             : 
      64             :                 void *mem_register_addr;
      65             :                 size_t mem_register_size;
      66             :         } pmr;
      67             : 
      68             :         /** stride in uint32_t units between doorbell registers (1 = 4 bytes, 2 = 8 bytes, ...) */
      69             :         uint32_t doorbell_stride_u32;
      70             : 
      71             :         /* Opaque handle to associated PCI device. */
      72             :         struct spdk_pci_device *devhandle;
      73             : 
      74             :         /* Flag to indicate the MMIO register has been remapped */
      75             :         bool is_remapped;
      76             : 
      77             :         volatile uint32_t *doorbell_base;
      78             : };
      79             : 
      80             : extern __thread struct nvme_pcie_ctrlr *g_thread_mmio_ctrlr;
      81             : 
      82             : struct nvme_tracker {
      83             :         TAILQ_ENTRY(nvme_tracker)       tq_list;
      84             : 
      85             :         struct nvme_request             *req;
      86             :         uint16_t                        cid;
      87             : 
      88             :         uint16_t                        bad_vtophys : 1;
      89             :         uint16_t                        rsvd0 : 15;
      90             :         uint32_t                        rsvd1;
      91             : 
      92             :         spdk_nvme_cmd_cb                cb_fn;
      93             :         void                            *cb_arg;
      94             : 
      95             :         uint64_t                        prp_sgl_bus_addr;
      96             : 
      97             :         /* Don't move, metadata SGL is always contiguous with Data Block SGL */
      98             :         struct spdk_nvme_sgl_descriptor         meta_sgl;
      99             :         union {
     100             :                 uint64_t                        prp[NVME_MAX_PRP_LIST_ENTRIES];
     101             :                 struct spdk_nvme_sgl_descriptor sgl[NVME_MAX_SGL_DESCRIPTORS];
     102             :         } u;
     103             : };
     104             : /*
     105             :  * struct nvme_tracker must be exactly 4K so that the prp[] array does not cross a page boundary
     106             :  * and so that there is no padding required to meet alignment requirements.
     107             :  */
     108             : SPDK_STATIC_ASSERT(sizeof(struct nvme_tracker) == 4096, "nvme_tracker is not 4K");
     109             : SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, u.sgl) & 7) == 0, "SGL must be Qword aligned");
     110             : SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, meta_sgl) & 7) == 0, "SGL must be Qword aligned");
     111             : 
     112             : struct nvme_pcie_poll_group {
     113             :         struct spdk_nvme_transport_poll_group group;
     114             :         struct spdk_nvme_pcie_stat stats;
     115             : };
     116             : 
     117             : enum nvme_pcie_qpair_state {
     118             :         NVME_PCIE_QPAIR_WAIT_FOR_CQ = 1,
     119             :         NVME_PCIE_QPAIR_WAIT_FOR_SQ,
     120             :         NVME_PCIE_QPAIR_READY,
     121             :         NVME_PCIE_QPAIR_FAILED,
     122             : };
     123             : 
     124             : /* PCIe transport extensions for spdk_nvme_qpair */
     125             : struct nvme_pcie_qpair {
     126             :         /* Submission queue tail doorbell */
     127             :         volatile uint32_t *sq_tdbl;
     128             : 
     129             :         /* Completion queue head doorbell */
     130             :         volatile uint32_t *cq_hdbl;
     131             : 
     132             :         /* Submission queue */
     133             :         struct spdk_nvme_cmd *cmd;
     134             : 
     135             :         /* Completion queue */
     136             :         struct spdk_nvme_cpl *cpl;
     137             : 
     138             :         TAILQ_HEAD(, nvme_tracker) free_tr;
     139             :         TAILQ_HEAD(nvme_outstanding_tr_head, nvme_tracker) outstanding_tr;
     140             : 
     141             :         /* Array of trackers indexed by command ID. */
     142             :         struct nvme_tracker *tr;
     143             : 
     144             :         struct spdk_nvme_pcie_stat *stat;
     145             : 
     146             :         uint16_t num_entries;
     147             : 
     148             :         uint8_t pcie_state;
     149             : 
     150             :         uint8_t retry_count;
     151             : 
     152             :         uint16_t max_completions_cap;
     153             : 
     154             :         uint16_t last_sq_tail;
     155             :         uint16_t sq_tail;
     156             :         uint16_t cq_head;
     157             :         uint16_t sq_head;
     158             : 
     159             :         struct {
     160             :                 uint8_t phase                   : 1;
     161             :                 uint8_t delay_cmd_submit        : 1;
     162             :                 uint8_t has_shadow_doorbell     : 1;
     163             :                 uint8_t has_pending_vtophys_failures : 1;
     164             :                 uint8_t defer_destruction       : 1;
     165             : 
     166             :                 /* Disable merging of physically contiguous SGL entries */
     167             :                 uint8_t disable_pcie_sgl_merge  : 1;
     168             :         } flags;
     169             : 
     170             :         /*
     171             :          * Base qpair structure.
     172             :          * This is located after the hot data in this structure so that the important parts of
     173             :          * nvme_pcie_qpair are in the same cache line.
     174             :          */
     175             :         struct spdk_nvme_qpair qpair;
     176             : 
     177             :         struct {
     178             :                 /* Submission queue shadow tail doorbell */
     179             :                 volatile uint32_t *sq_tdbl;
     180             : 
     181             :                 /* Completion queue shadow head doorbell */
     182             :                 volatile uint32_t *cq_hdbl;
     183             : 
     184             :                 /* Submission queue event index */
     185             :                 volatile uint32_t *sq_eventidx;
     186             : 
     187             :                 /* Completion queue event index */
     188             :                 volatile uint32_t *cq_eventidx;
     189             :         } shadow_doorbell;
     190             : 
     191             :         /*
     192             :          * Fields below this point should not be touched on the normal I/O path.
     193             :          */
     194             : 
     195             :         bool sq_in_cmb;
     196             :         bool shared_stats;
     197             : 
     198             :         uint64_t cmd_bus_addr;
     199             :         uint64_t cpl_bus_addr;
     200             : 
     201             :         struct spdk_nvme_cmd *sq_vaddr;
     202             :         struct spdk_nvme_cpl *cq_vaddr;
     203             : };
     204             : 
     205             : static inline struct nvme_pcie_qpair *
     206          46 : nvme_pcie_qpair(struct spdk_nvme_qpair *qpair)
     207             : {
     208          46 :         return SPDK_CONTAINEROF(qpair, struct nvme_pcie_qpair, qpair);
     209             : }
     210             : 
     211             : static inline struct nvme_pcie_ctrlr *
     212         170 : nvme_pcie_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
     213             : {
     214         170 :         return SPDK_CONTAINEROF(ctrlr, struct nvme_pcie_ctrlr, ctrlr);
     215             : }
     216             : 
     217             : static inline int
     218           2 : nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
     219             : {
     220           2 :         return (uint16_t)(new_idx - event_idx) <= (uint16_t)(new_idx - old);
     221             : }
     222             : 
     223             : static inline bool
     224           0 : nvme_pcie_qpair_update_mmio_required(uint16_t value,
     225             :                                      volatile uint32_t *shadow_db,
     226             :                                      volatile uint32_t *eventidx)
     227             : {
     228             :         uint16_t old;
     229             : 
     230           0 :         spdk_wmb();
     231             : 
     232           0 :         old = *shadow_db;
     233           0 :         *shadow_db = value;
     234             : 
     235             :         /*
     236             :          * Ensure that the doorbell is updated before reading the EventIdx from
     237             :          * memory
     238             :          */
     239           0 :         spdk_mb();
     240             : 
     241           0 :         if (!nvme_pcie_qpair_need_event(*eventidx, value, old)) {
     242           0 :                 return false;
     243             :         }
     244             : 
     245           0 :         return true;
     246             : }
     247             : 
     248             : static inline void
     249           0 : nvme_pcie_qpair_ring_sq_doorbell(struct spdk_nvme_qpair *qpair)
     250             : {
     251           0 :         struct nvme_pcie_qpair  *pqpair = nvme_pcie_qpair(qpair);
     252           0 :         struct nvme_pcie_ctrlr  *pctrlr = nvme_pcie_ctrlr(qpair->ctrlr);
     253           0 :         bool need_mmio = true;
     254             : 
     255           0 :         if (qpair->last_fuse == SPDK_NVME_IO_FLAGS_FUSE_FIRST) {
     256             :                 /* This is first cmd of two fused commands - don't ring doorbell */
     257           0 :                 return;
     258             :         }
     259             : 
     260           0 :         if (spdk_unlikely(pqpair->flags.has_shadow_doorbell)) {
     261           0 :                 pqpair->stat->sq_shadow_doorbell_updates++;
     262           0 :                 need_mmio = nvme_pcie_qpair_update_mmio_required(
     263           0 :                                     pqpair->sq_tail,
     264             :                                     pqpair->shadow_doorbell.sq_tdbl,
     265             :                                     pqpair->shadow_doorbell.sq_eventidx);
     266             :         }
     267             : 
     268           0 :         if (spdk_likely(need_mmio)) {
     269           0 :                 spdk_wmb();
     270           0 :                 pqpair->stat->sq_mmio_doorbell_updates++;
     271           0 :                 g_thread_mmio_ctrlr = pctrlr;
     272           0 :                 spdk_mmio_write_4(pqpair->sq_tdbl, pqpair->sq_tail);
     273           0 :                 g_thread_mmio_ctrlr = NULL;
     274             :         }
     275             : }
     276             : 
     277             : static inline void
     278           0 : nvme_pcie_qpair_ring_cq_doorbell(struct spdk_nvme_qpair *qpair)
     279             : {
     280           0 :         struct nvme_pcie_qpair  *pqpair = nvme_pcie_qpair(qpair);
     281           0 :         struct nvme_pcie_ctrlr  *pctrlr = nvme_pcie_ctrlr(qpair->ctrlr);
     282           0 :         bool need_mmio = true;
     283             : 
     284           0 :         if (spdk_unlikely(pqpair->flags.has_shadow_doorbell)) {
     285           0 :                 pqpair->stat->cq_shadow_doorbell_updates++;
     286           0 :                 need_mmio = nvme_pcie_qpair_update_mmio_required(
     287           0 :                                     pqpair->cq_head,
     288             :                                     pqpair->shadow_doorbell.cq_hdbl,
     289             :                                     pqpair->shadow_doorbell.cq_eventidx);
     290             :         }
     291             : 
     292           0 :         if (spdk_likely(need_mmio)) {
     293           0 :                 pqpair->stat->cq_mmio_doorbell_updates++;
     294           0 :                 g_thread_mmio_ctrlr = pctrlr;
     295           0 :                 spdk_mmio_write_4(pqpair->cq_hdbl, pqpair->cq_head);
     296           0 :                 g_thread_mmio_ctrlr = NULL;
     297             :         }
     298           0 : }
     299             : 
     300             : int nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair);
     301             : int nvme_pcie_qpair_get_fd(struct spdk_nvme_qpair *qpair, struct spdk_event_handler_opts *opts);
     302             : int nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
     303             :                               const struct spdk_nvme_io_qpair_opts *opts);
     304             : int nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t num_entries);
     305             : void nvme_pcie_qpair_insert_pending_admin_request(struct spdk_nvme_qpair *qpair,
     306             :                 struct nvme_request *req, struct spdk_nvme_cpl *cpl);
     307             : void nvme_pcie_qpair_complete_pending_admin_request(struct spdk_nvme_qpair *qpair);
     308             : int nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
     309             :                                      struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
     310             :                                      void *cb_arg);
     311             : int nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
     312             :                                      struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
     313             : int nvme_pcie_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
     314             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
     315             : int nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
     316             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg);
     317             : int nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
     318             : void nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
     319             : void nvme_pcie_qpair_abort_trackers(struct spdk_nvme_qpair *qpair, uint32_t dnr);
     320             : void nvme_pcie_qpair_manual_complete_tracker(struct spdk_nvme_qpair *qpair,
     321             :                 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
     322             :                 bool print_on_error);
     323             : void nvme_pcie_qpair_complete_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr,
     324             :                                       struct spdk_nvme_cpl *cpl, bool print_on_error);
     325             : void nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr);
     326             : void nvme_pcie_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
     327             : void nvme_pcie_admin_qpair_destroy(struct spdk_nvme_qpair *qpair);
     328             : void nvme_pcie_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
     329             : int32_t nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair,
     330             :                 uint32_t max_completions);
     331             : int nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair);
     332             : struct spdk_nvme_qpair *nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
     333             :                 const struct spdk_nvme_io_qpair_opts *opts);
     334             : int nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
     335             : int nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
     336             : int nvme_pcie_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
     337             :                                    struct spdk_nvme_transport_poll_group_stat **_stats);
     338             : void nvme_pcie_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
     339             :                                      struct spdk_nvme_transport_poll_group_stat *stats);
     340             : 
     341             : struct spdk_nvme_transport_poll_group *nvme_pcie_poll_group_create(void);
     342             : int nvme_pcie_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
     343             : int nvme_pcie_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
     344             : int nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
     345             :                              struct spdk_nvme_qpair *qpair);
     346             : int nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
     347             :                                 struct spdk_nvme_qpair *qpair);
     348             : int64_t nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
     349             :                 uint32_t completions_per_qpair,
     350             :                 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
     351             : void nvme_pcie_poll_group_check_disconnected_qpairs(
     352             :         struct spdk_nvme_transport_poll_group *tgroup,
     353             :         spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
     354             : int nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
     355             : 
     356             : #endif

Generated by: LCOV version 1.15