LCOV - code coverage report
Current view: top level - lib/nvme - nvme_ctrlr.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 1599 2727 58.6 %
Date: 2024-11-05 10:06:02 Functions: 142 212 67.0 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
       3             :  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
       4             :  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       5             :  */
       6             : 
       7             : #include "spdk/stdinc.h"
       8             : 
       9             : #include "nvme_internal.h"
      10             : #include "nvme_io_msg.h"
      11             : 
      12             : #include "spdk/env.h"
      13             : #include "spdk/string.h"
      14             : #include "spdk/endian.h"
      15             : 
      16             : struct nvme_active_ns_ctx;
      17             : 
      18             : static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
      19             :                 struct nvme_async_event_request *aer);
      20             : static void nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx);
      21             : static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
      22             : static int nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns);
      23             : static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
      24             : static void nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr);
      25             : static void nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
      26             :                                  uint64_t timeout_in_ms);
      27             : 
      28             : static int
      29      477891 : nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
      30             : {
      31      477891 :         if (ns1->id < ns2->id) {
      32      164867 :                 return -1;
      33      313024 :         } else if (ns1->id > ns2->id) {
      34      276062 :                 return 1;
      35             :         } else {
      36       36962 :                 return 0;
      37             :         }
      38             : }
      39             : 
      40      607070 : RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
      41             : 
      42             : #define nvme_ctrlr_get_reg_async(ctrlr, reg, sz, cb_fn, cb_arg) \
      43             :         nvme_transport_ctrlr_get_reg_ ## sz ## _async(ctrlr, \
      44             :                 offsetof(struct spdk_nvme_registers, reg), cb_fn, cb_arg)
      45             : 
      46             : #define nvme_ctrlr_set_reg_async(ctrlr, reg, sz, val, cb_fn, cb_arg) \
      47             :         nvme_transport_ctrlr_set_reg_ ## sz ## _async(ctrlr, \
      48             :                 offsetof(struct spdk_nvme_registers, reg), val, cb_fn, cb_arg)
      49             : 
      50             : #define nvme_ctrlr_get_cc_async(ctrlr, cb_fn, cb_arg) \
      51             :         nvme_ctrlr_get_reg_async(ctrlr, cc, 4, cb_fn, cb_arg)
      52             : 
      53             : #define nvme_ctrlr_get_csts_async(ctrlr, cb_fn, cb_arg) \
      54             :         nvme_ctrlr_get_reg_async(ctrlr, csts, 4, cb_fn, cb_arg)
      55             : 
      56             : #define nvme_ctrlr_get_cap_async(ctrlr, cb_fn, cb_arg) \
      57             :         nvme_ctrlr_get_reg_async(ctrlr, cap, 8, cb_fn, cb_arg)
      58             : 
      59             : #define nvme_ctrlr_get_vs_async(ctrlr, cb_fn, cb_arg) \
      60             :         nvme_ctrlr_get_reg_async(ctrlr, vs, 4, cb_fn, cb_arg)
      61             : 
      62             : #define nvme_ctrlr_set_cc_async(ctrlr, value, cb_fn, cb_arg) \
      63             :         nvme_ctrlr_set_reg_async(ctrlr, cc, 4, value, cb_fn, cb_arg)
      64             : 
      65             : static int
      66           0 : nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
      67             : {
      68           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
      69             :                                               &cc->raw);
      70             : }
      71             : 
      72             : static int
      73           0 : nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
      74             : {
      75           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
      76             :                                               &csts->raw);
      77             : }
      78             : 
      79             : int
      80           0 : nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
      81             : {
      82           0 :         return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
      83             :                                               &cap->raw);
      84             : }
      85             : 
      86             : int
      87           1 : nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
      88             : {
      89           1 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
      90             :                                               &vs->raw);
      91             : }
      92             : 
      93             : int
      94           0 : nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
      95             : {
      96           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
      97             :                                               &cmbsz->raw);
      98             : }
      99             : 
     100             : int
     101           0 : nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap)
     102             : {
     103           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
     104             :                                               &pmrcap->raw);
     105             : }
     106             : 
     107             : int
     108           0 : nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo)
     109             : {
     110           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bpinfo.raw),
     111             :                                               &bpinfo->raw);
     112             : }
     113             : 
     114             : int
     115           0 : nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel)
     116             : {
     117           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bprsel.raw),
     118             :                                               bprsel->raw);
     119             : }
     120             : 
     121             : int
     122           0 : nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value)
     123             : {
     124           0 :         return nvme_transport_ctrlr_set_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, bpmbl),
     125             :                                               bpmbl_value);
     126             : }
     127             : 
     128             : static int
     129           0 : nvme_ctrlr_set_nssr(struct spdk_nvme_ctrlr *ctrlr, uint32_t nssr_value)
     130             : {
     131           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, nssr),
     132             :                                               nssr_value);
     133             : }
     134             : 
     135             : bool
     136          33 : nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr)
     137             : {
     138          35 :         return ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS &&
     139           2 :                ctrlr->opts.command_set == SPDK_NVME_CC_CSS_IOCS;
     140             : }
     141             : 
     142             : /* When the field in spdk_nvme_ctrlr_opts are changed and you change this function, please
     143             :  * also update the nvme_ctrl_opts_init function in nvme_ctrlr.c
     144             :  */
     145             : void
     146           2 : spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
     147             : {
     148           2 :         assert(opts);
     149             : 
     150           2 :         opts->opts_size = opts_size;
     151             : 
     152             : #define FIELD_OK(field) \
     153             :         offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
     154             : 
     155             : #define SET_FIELD(field, value) \
     156             :         if (offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size) { \
     157             :                 opts->field = value; \
     158             :         } \
     159             : 
     160           2 :         SET_FIELD(num_io_queues, DEFAULT_MAX_IO_QUEUES);
     161           2 :         SET_FIELD(use_cmb_sqs, false);
     162           2 :         SET_FIELD(no_shn_notification, false);
     163           2 :         SET_FIELD(enable_interrupts, false);
     164           2 :         SET_FIELD(arb_mechanism, SPDK_NVME_CC_AMS_RR);
     165           2 :         SET_FIELD(arbitration_burst, 0);
     166           2 :         SET_FIELD(low_priority_weight, 0);
     167           2 :         SET_FIELD(medium_priority_weight, 0);
     168           2 :         SET_FIELD(high_priority_weight, 0);
     169           2 :         SET_FIELD(keep_alive_timeout_ms, MIN_KEEP_ALIVE_TIMEOUT_IN_MS);
     170           2 :         SET_FIELD(transport_retry_count, SPDK_NVME_DEFAULT_RETRY_COUNT);
     171           2 :         SET_FIELD(io_queue_size, DEFAULT_IO_QUEUE_SIZE);
     172             : 
     173           2 :         if (nvme_driver_init() == 0) {
     174           2 :                 if (FIELD_OK(hostnqn)) {
     175           1 :                         nvme_get_default_hostnqn(opts->hostnqn, sizeof(opts->hostnqn));
     176             :                 }
     177             : 
     178           2 :                 if (FIELD_OK(extended_host_id)) {
     179           1 :                         memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
     180             :                                sizeof(opts->extended_host_id));
     181             :                 }
     182             : 
     183             :         }
     184             : 
     185           2 :         SET_FIELD(io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
     186             : 
     187           2 :         if (FIELD_OK(src_addr)) {
     188           1 :                 memset(opts->src_addr, 0, sizeof(opts->src_addr));
     189             :         }
     190             : 
     191           2 :         if (FIELD_OK(src_svcid)) {
     192           1 :                 memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
     193             :         }
     194             : 
     195           2 :         if (FIELD_OK(host_id)) {
     196           1 :                 memset(opts->host_id, 0, sizeof(opts->host_id));
     197             :         }
     198             : 
     199           2 :         SET_FIELD(command_set, CHAR_BIT);
     200           2 :         SET_FIELD(admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
     201           2 :         SET_FIELD(header_digest, false);
     202           2 :         SET_FIELD(data_digest, false);
     203           2 :         SET_FIELD(disable_error_logging, false);
     204           2 :         SET_FIELD(transport_ack_timeout, SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT);
     205           2 :         SET_FIELD(admin_queue_size, DEFAULT_ADMIN_QUEUE_SIZE);
     206           2 :         SET_FIELD(fabrics_connect_timeout_us, NVME_FABRIC_CONNECT_COMMAND_TIMEOUT);
     207           2 :         SET_FIELD(disable_read_ana_log_page, false);
     208           2 :         SET_FIELD(disable_read_changed_ns_list_log_page, false);
     209           2 :         SET_FIELD(tls_psk, NULL);
     210           2 :         SET_FIELD(dhchap_key, NULL);
     211           2 :         SET_FIELD(dhchap_ctrlr_key, NULL);
     212           2 :         SET_FIELD(dhchap_digests,
     213             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) |
     214             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) |
     215             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512));
     216           2 :         SET_FIELD(dhchap_dhgroups,
     217             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) |
     218             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) |
     219             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) |
     220             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) |
     221             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) |
     222             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192));
     223             : #undef FIELD_OK
     224             : #undef SET_FIELD
     225           2 : }
     226             : 
     227             : const struct spdk_nvme_ctrlr_opts *
     228           0 : spdk_nvme_ctrlr_get_opts(struct spdk_nvme_ctrlr *ctrlr)
     229             : {
     230           0 :         return &ctrlr->opts;
     231             : }
     232             : 
     233             : /**
     234             :  * This function will be called when the process allocates the IO qpair.
     235             :  * Note: the ctrlr_lock must be held when calling this function.
     236             :  */
     237             : static void
     238          15 : nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
     239             : {
     240             :         struct spdk_nvme_ctrlr_process  *active_proc;
     241          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     242             : 
     243          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     244          15 :         if (active_proc) {
     245           0 :                 TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
     246           0 :                 qpair->active_proc = active_proc;
     247             :         }
     248          15 : }
     249             : 
     250             : /**
     251             :  * This function will be called when the process frees the IO qpair.
     252             :  * Note: the ctrlr_lock must be held when calling this function.
     253             :  */
     254             : static void
     255          15 : nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
     256             : {
     257             :         struct spdk_nvme_ctrlr_process  *active_proc;
     258          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     259             :         struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
     260             : 
     261          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     262          15 :         if (!active_proc) {
     263          15 :                 return;
     264             :         }
     265             : 
     266           0 :         TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
     267             :                            per_process_tailq, tmp_qpair) {
     268           0 :                 if (active_qpair == qpair) {
     269           0 :                         TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
     270             :                                      active_qpair, per_process_tailq);
     271             : 
     272           0 :                         break;
     273             :                 }
     274             :         }
     275             : }
     276             : 
     277             : void
     278          27 : spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
     279             :                 struct spdk_nvme_io_qpair_opts *opts,
     280             :                 size_t opts_size)
     281             : {
     282          27 :         assert(ctrlr);
     283             : 
     284          27 :         assert(opts);
     285             : 
     286          27 :         memset(opts, 0, opts_size);
     287          27 :         opts->opts_size = opts_size;
     288             : 
     289             : #define FIELD_OK(field) \
     290             :         offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
     291             : 
     292             : #define SET_FIELD(field, value) \
     293             :         if (FIELD_OK(field)) { \
     294             :                 opts->field = value; \
     295             :         } \
     296             : 
     297          27 :         SET_FIELD(qprio, SPDK_NVME_QPRIO_URGENT);
     298          27 :         SET_FIELD(io_queue_size, ctrlr->opts.io_queue_size);
     299          27 :         SET_FIELD(io_queue_requests, ctrlr->opts.io_queue_requests);
     300          27 :         SET_FIELD(delay_cmd_submit, false);
     301          27 :         SET_FIELD(sq.vaddr, NULL);
     302          27 :         SET_FIELD(sq.paddr, 0);
     303          27 :         SET_FIELD(sq.buffer_size, 0);
     304          27 :         SET_FIELD(cq.vaddr, NULL);
     305          27 :         SET_FIELD(cq.paddr, 0);
     306          27 :         SET_FIELD(cq.buffer_size, 0);
     307          27 :         SET_FIELD(create_only, false);
     308          27 :         SET_FIELD(async_mode, false);
     309          27 :         SET_FIELD(disable_pcie_sgl_merge, false);
     310             : 
     311             : #undef FIELD_OK
     312             : #undef SET_FIELD
     313          27 : }
     314             : 
     315             : static void
     316          18 : nvme_ctrlr_io_qpair_opts_copy(struct spdk_nvme_io_qpair_opts *dst,
     317             :                               const struct spdk_nvme_io_qpair_opts *src, size_t opts_size_src)
     318             : {
     319          18 :         if (!opts_size_src) {
     320           0 :                 SPDK_ERRLOG("opts_size_src should not be zero value\n");
     321           0 :                 assert(false);
     322             :         }
     323             : 
     324             : #define FIELD_OK(field) \
     325             :         offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(src->field) <= opts_size_src
     326             : 
     327             : #define SET_FIELD(field) \
     328             :         if (FIELD_OK(field)) { \
     329             :                 dst->field = src->field; \
     330             :         } \
     331             : 
     332          18 :         SET_FIELD(qprio);
     333          18 :         SET_FIELD(io_queue_size);
     334          18 :         SET_FIELD(io_queue_requests);
     335          18 :         SET_FIELD(delay_cmd_submit);
     336          18 :         SET_FIELD(sq.vaddr);
     337          18 :         SET_FIELD(sq.paddr);
     338          18 :         SET_FIELD(sq.buffer_size);
     339          18 :         SET_FIELD(cq.vaddr);
     340          18 :         SET_FIELD(cq.paddr);
     341          18 :         SET_FIELD(cq.buffer_size);
     342          18 :         SET_FIELD(create_only);
     343          18 :         SET_FIELD(async_mode);
     344          18 :         SET_FIELD(disable_pcie_sgl_merge);
     345             : 
     346          18 :         dst->opts_size = opts_size_src;
     347             : 
     348             :         /* You should not remove this statement, but need to update the assert statement
     349             :          * if you add a new field, and also add a corresponding SET_FIELD statement */
     350             :         SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_io_qpair_opts) == 80, "Incorrect size");
     351             : 
     352             : #undef FIELD_OK
     353             : #undef SET_FIELD
     354          18 : }
     355             : 
     356             : static struct spdk_nvme_qpair *
     357          22 : nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     358             :                            const struct spdk_nvme_io_qpair_opts *opts)
     359             : {
     360             :         int32_t                                 qid;
     361             :         struct spdk_nvme_qpair                  *qpair;
     362             :         union spdk_nvme_cc_register             cc;
     363             : 
     364          22 :         if (!ctrlr) {
     365           0 :                 return NULL;
     366             :         }
     367             : 
     368          22 :         nvme_ctrlr_lock(ctrlr);
     369          22 :         cc.raw = ctrlr->process_init_cc.raw;
     370             : 
     371          22 :         if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
     372           2 :                 nvme_ctrlr_unlock(ctrlr);
     373           2 :                 return NULL;
     374             :         }
     375             : 
     376             :         /*
     377             :          * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
     378             :          * default round robin arbitration method.
     379             :          */
     380          20 :         if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
     381           3 :                 NVME_CTRLR_ERRLOG(ctrlr, "invalid queue priority for default round robin arbitration method\n");
     382           3 :                 nvme_ctrlr_unlock(ctrlr);
     383           3 :                 return NULL;
     384             :         }
     385             : 
     386          17 :         qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
     387          17 :         if (qid < 0) {
     388           2 :                 nvme_ctrlr_unlock(ctrlr);
     389           2 :                 return NULL;
     390             :         }
     391             : 
     392          15 :         qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
     393          15 :         if (qpair == NULL) {
     394           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_create_io_qpair() failed\n");
     395           0 :                 spdk_nvme_ctrlr_free_qid(ctrlr, qid);
     396           0 :                 nvme_ctrlr_unlock(ctrlr);
     397           0 :                 return NULL;
     398             :         }
     399             : 
     400          15 :         TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
     401             : 
     402          15 :         nvme_ctrlr_proc_add_io_qpair(qpair);
     403             : 
     404          15 :         nvme_ctrlr_unlock(ctrlr);
     405             : 
     406          15 :         return qpair;
     407             : }
     408             : 
     409             : int
     410          15 : spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
     411             : {
     412             :         int rc;
     413             : 
     414          15 :         if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
     415           0 :                 return -EISCONN;
     416             :         }
     417             : 
     418          15 :         nvme_ctrlr_lock(ctrlr);
     419          15 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     420          15 :         nvme_ctrlr_unlock(ctrlr);
     421             : 
     422          15 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
     423           0 :                 spdk_delay_us(100);
     424             :         }
     425             : 
     426          15 :         return rc;
     427             : }
     428             : 
     429             : void
     430           0 : spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     431             : {
     432           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     433             : 
     434           0 :         nvme_ctrlr_lock(ctrlr);
     435           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     436           0 :         nvme_ctrlr_unlock(ctrlr);
     437           0 : }
     438             : 
     439             : int
     440           0 : spdk_nvme_ctrlr_get_admin_qp_fd(struct spdk_nvme_ctrlr *ctrlr,
     441             :                                 struct spdk_event_handler_opts *opts)
     442             : {
     443           0 :         return spdk_nvme_qpair_get_fd(ctrlr->adminq, opts);
     444             : }
     445             : 
     446             : struct spdk_nvme_qpair *
     447          23 : spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     448             :                                const struct spdk_nvme_io_qpair_opts *user_opts,
     449             :                                size_t opts_size)
     450             : {
     451             : 
     452          23 :         struct spdk_nvme_qpair          *qpair = NULL;
     453          23 :         struct spdk_nvme_io_qpair_opts  opts;
     454             :         int                             rc;
     455             : 
     456          23 :         nvme_ctrlr_lock(ctrlr);
     457             : 
     458          23 :         if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
     459             :                 /* When controller is resetting or initializing, free_io_qids is deleted or not created yet.
     460             :                  * We can't create IO qpair in that case */
     461           1 :                 goto unlock;
     462             :         }
     463             : 
     464             :         /*
     465             :          * Get the default options, then overwrite them with the user-provided options
     466             :          * up to opts_size.
     467             :          *
     468             :          * This allows for extensions of the opts structure without breaking
     469             :          * ABI compatibility.
     470             :          */
     471          22 :         spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
     472          22 :         if (user_opts) {
     473          18 :                 nvme_ctrlr_io_qpair_opts_copy(&opts, user_opts, spdk_min(opts.opts_size, opts_size));
     474             : 
     475             :                 /* If user passes buffers, make sure they're big enough for the requested queue size */
     476          18 :                 if (opts.sq.vaddr) {
     477           0 :                         if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
     478           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "sq buffer size %" PRIx64 " is too small for sq size %zx\n",
     479             :                                                   opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
     480           0 :                                 goto unlock;
     481             :                         }
     482             :                 }
     483          18 :                 if (opts.cq.vaddr) {
     484           0 :                         if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
     485           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "cq buffer size %" PRIx64 " is too small for cq size %zx\n",
     486             :                                                   opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
     487           0 :                                 goto unlock;
     488             :                         }
     489             :                 }
     490             :         }
     491             : 
     492          22 :         if (ctrlr->opts.enable_interrupts && opts.delay_cmd_submit) {
     493           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "delay command submit cannot work with interrupts\n");
     494           0 :                 goto unlock;
     495             :         }
     496             : 
     497          22 :         qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
     498             : 
     499          22 :         if (qpair == NULL || opts.create_only == true) {
     500           7 :                 goto unlock;
     501             :         }
     502             : 
     503          15 :         rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
     504          15 :         if (rc != 0) {
     505           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_connect_io_qpair() failed\n");
     506           1 :                 nvme_ctrlr_proc_remove_io_qpair(qpair);
     507           1 :                 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     508           1 :                 spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
     509           1 :                 nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     510           1 :                 qpair = NULL;
     511           1 :                 goto unlock;
     512             :         }
     513             : 
     514          23 : unlock:
     515          23 :         nvme_ctrlr_unlock(ctrlr);
     516             : 
     517          23 :         return qpair;
     518             : }
     519             : 
     520             : int
     521           8 : spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     522             : {
     523             :         struct spdk_nvme_ctrlr *ctrlr;
     524             :         enum nvme_qpair_state qpair_state;
     525             :         int rc;
     526             : 
     527           8 :         assert(qpair != NULL);
     528           8 :         assert(nvme_qpair_is_admin_queue(qpair) == false);
     529           8 :         assert(qpair->ctrlr != NULL);
     530             : 
     531           8 :         ctrlr = qpair->ctrlr;
     532           8 :         nvme_ctrlr_lock(ctrlr);
     533           8 :         qpair_state = nvme_qpair_get_state(qpair);
     534             : 
     535           8 :         if (ctrlr->is_removed) {
     536           2 :                 rc = -ENODEV;
     537           2 :                 goto out;
     538             :         }
     539             : 
     540           6 :         if (ctrlr->is_resetting || qpair_state == NVME_QPAIR_DISCONNECTING) {
     541           2 :                 rc = -EAGAIN;
     542           2 :                 goto out;
     543             :         }
     544             : 
     545           4 :         if (ctrlr->is_failed || qpair_state == NVME_QPAIR_DESTROYING) {
     546           2 :                 rc = -ENXIO;
     547           2 :                 goto out;
     548             :         }
     549             : 
     550           2 :         if (qpair_state != NVME_QPAIR_DISCONNECTED) {
     551           1 :                 rc = 0;
     552           1 :                 goto out;
     553             :         }
     554             : 
     555           1 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     556           1 :         if (rc) {
     557           0 :                 rc = -EAGAIN;
     558           0 :                 goto out;
     559             :         }
     560             : 
     561           1 : out:
     562           8 :         nvme_ctrlr_unlock(ctrlr);
     563           8 :         return rc;
     564             : }
     565             : 
     566             : spdk_nvme_qp_failure_reason
     567           0 : spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
     568             : {
     569           0 :         return ctrlr->adminq->transport_failure_reason;
     570             : }
     571             : 
     572             : /*
     573             :  * This internal function will attempt to take the controller
     574             :  * lock before calling disconnect on a controller qpair.
     575             :  * Functions already holding the controller lock should
     576             :  * call nvme_transport_ctrlr_disconnect_qpair directly.
     577             :  */
     578             : void
     579           0 : nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
     580             : {
     581           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     582             : 
     583           0 :         assert(ctrlr != NULL);
     584           0 :         nvme_ctrlr_lock(ctrlr);
     585           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     586           0 :         nvme_ctrlr_unlock(ctrlr);
     587           0 : }
     588             : 
     589             : int
     590          14 : spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
     591             : {
     592             :         struct spdk_nvme_ctrlr *ctrlr;
     593             : 
     594          14 :         if (qpair == NULL) {
     595           0 :                 return 0;
     596             :         }
     597             : 
     598          14 :         ctrlr = qpair->ctrlr;
     599             : 
     600          14 :         if (qpair->in_completion_context) {
     601             :                 /*
     602             :                  * There are many cases where it is convenient to delete an io qpair in the context
     603             :                  *  of that qpair's completion routine.  To handle this properly, set a flag here
     604             :                  *  so that the completion routine will perform an actual delete after the context
     605             :                  *  unwinds.
     606             :                  */
     607           0 :                 qpair->delete_after_completion_context = 1;
     608           0 :                 return 0;
     609             :         }
     610             : 
     611          14 :         if (qpair->auth.cb_fn != NULL) {
     612           0 :                 qpair->auth.cb_fn(qpair->auth.cb_ctx, -ECANCELED);
     613           0 :                 qpair->auth.cb_fn = NULL;
     614             :         }
     615             : 
     616          14 :         qpair->destroy_in_progress = 1;
     617             : 
     618          14 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     619             : 
     620          14 :         if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
     621           0 :                 spdk_nvme_poll_group_remove(qpair->poll_group->group, qpair);
     622             :         }
     623             : 
     624             :         /* Do not retry. */
     625          14 :         nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
     626             : 
     627             :         /* In the multi-process case, a process may call this function on a foreign
     628             :          * I/O qpair (i.e. one that this process did not create) when that qpairs process
     629             :          * exits unexpectedly.  In that case, we must not try to abort any reqs associated
     630             :          * with that qpair, since the callbacks will also be foreign to this process.
     631             :          */
     632          14 :         if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
     633          14 :                 nvme_qpair_abort_all_queued_reqs(qpair);
     634             :         }
     635             : 
     636          14 :         nvme_ctrlr_lock(ctrlr);
     637             : 
     638          14 :         nvme_ctrlr_proc_remove_io_qpair(qpair);
     639             : 
     640          14 :         TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     641          14 :         spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);
     642             : 
     643          14 :         nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     644          14 :         nvme_ctrlr_unlock(ctrlr);
     645          14 :         return 0;
     646             : }
     647             : 
     648             : static void
     649           3 : nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
     650             :                 struct spdk_nvme_intel_log_page_directory *log_page_directory)
     651             : {
     652           3 :         if (log_page_directory == NULL) {
     653           0 :                 return;
     654             :         }
     655             : 
     656           3 :         assert(ctrlr->cdata.vid == SPDK_PCI_VID_INTEL);
     657             : 
     658           3 :         ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
     659             : 
     660           3 :         if (log_page_directory->read_latency_log_len ||
     661           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
     662           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
     663             :         }
     664           3 :         if (log_page_directory->write_latency_log_len ||
     665           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
     666           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
     667             :         }
     668           3 :         if (log_page_directory->temperature_statistics_log_len) {
     669           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
     670             :         }
     671           3 :         if (log_page_directory->smart_log_len) {
     672           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
     673             :         }
     674           3 :         if (log_page_directory->marketing_description_log_len) {
     675           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
     676             :         }
     677             : }
     678             : 
     679             : struct intel_log_pages_ctx {
     680             :         struct spdk_nvme_intel_log_page_directory log_page_directory;
     681             :         struct spdk_nvme_ctrlr *ctrlr;
     682             : };
     683             : 
     684             : static void
     685           1 : nvme_ctrlr_set_intel_support_log_pages_done(void *arg, const struct spdk_nvme_cpl *cpl)
     686             : {
     687           1 :         struct intel_log_pages_ctx *ctx = arg;
     688           1 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
     689             : 
     690           1 :         if (!spdk_nvme_cpl_is_error(cpl)) {
     691           1 :                 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, &ctx->log_page_directory);
     692             :         }
     693             : 
     694           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     695           1 :                              ctrlr->opts.admin_timeout_ms);
     696           1 :         free(ctx);
     697           1 : }
     698             : 
     699             : static int
     700           1 : nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     701             : {
     702           1 :         int rc = 0;
     703             :         struct intel_log_pages_ctx *ctx;
     704             : 
     705           1 :         ctx = calloc(1, sizeof(*ctx));
     706           1 :         if (!ctx) {
     707           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     708           0 :                                      ctrlr->opts.admin_timeout_ms);
     709           0 :                 return 0;
     710             :         }
     711             : 
     712           1 :         ctx->ctrlr = ctrlr;
     713             : 
     714           1 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
     715           1 :                                               SPDK_NVME_GLOBAL_NS_TAG, &ctx->log_page_directory,
     716             :                                               sizeof(struct spdk_nvme_intel_log_page_directory),
     717             :                                               0, nvme_ctrlr_set_intel_support_log_pages_done, ctx);
     718           1 :         if (rc != 0) {
     719           0 :                 free(ctx);
     720           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     721           0 :                                      ctrlr->opts.admin_timeout_ms);
     722           0 :                 return 0;
     723             :         }
     724             : 
     725           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
     726           1 :                              ctrlr->opts.admin_timeout_ms);
     727             : 
     728           1 :         return 0;
     729             : }
     730             : 
     731             : static int
     732           4 : nvme_ctrlr_alloc_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     733             : {
     734             :         uint32_t ana_log_page_size;
     735             : 
     736           4 :         ana_log_page_size = sizeof(struct spdk_nvme_ana_page) + ctrlr->cdata.nanagrpid *
     737           4 :                             sizeof(struct spdk_nvme_ana_group_descriptor) + ctrlr->active_ns_count *
     738             :                             sizeof(uint32_t);
     739             : 
     740             :         /* Number of active namespaces may have changed.
     741             :          * Check if ANA log page fits into existing buffer.
     742             :          */
     743           4 :         if (ana_log_page_size > ctrlr->ana_log_page_size) {
     744             :                 void *new_buffer;
     745             : 
     746           4 :                 if (ctrlr->ana_log_page) {
     747           1 :                         new_buffer = realloc(ctrlr->ana_log_page, ana_log_page_size);
     748             :                 } else {
     749           3 :                         new_buffer = calloc(1, ana_log_page_size);
     750             :                 }
     751             : 
     752           4 :                 if (!new_buffer) {
     753           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate ANA log page buffer, size %u\n",
     754             :                                           ana_log_page_size);
     755           0 :                         return -ENXIO;
     756             :                 }
     757             : 
     758           4 :                 ctrlr->ana_log_page = new_buffer;
     759           4 :                 if (ctrlr->copied_ana_desc) {
     760           1 :                         new_buffer = realloc(ctrlr->copied_ana_desc, ana_log_page_size);
     761             :                 } else {
     762           3 :                         new_buffer = calloc(1, ana_log_page_size);
     763             :                 }
     764             : 
     765           4 :                 if (!new_buffer) {
     766           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate a buffer to parse ANA descriptor, size %u\n",
     767             :                                           ana_log_page_size);
     768           0 :                         return -ENOMEM;
     769             :                 }
     770             : 
     771           4 :                 ctrlr->copied_ana_desc = new_buffer;
     772           4 :                 ctrlr->ana_log_page_size = ana_log_page_size;
     773             :         }
     774             : 
     775           4 :         return 0;
     776             : }
     777             : 
     778             : static int
     779           4 : nvme_ctrlr_update_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     780             : {
     781             :         struct nvme_completion_poll_status *status;
     782             :         int rc;
     783             : 
     784           4 :         rc = nvme_ctrlr_alloc_ana_log_page(ctrlr);
     785           4 :         if (rc != 0) {
     786           0 :                 return rc;
     787             :         }
     788             : 
     789           4 :         status = calloc(1, sizeof(*status));
     790           4 :         if (status == NULL) {
     791           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     792           0 :                 return -ENOMEM;
     793             :         }
     794             : 
     795           4 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
     796           4 :                                               SPDK_NVME_GLOBAL_NS_TAG, ctrlr->ana_log_page,
     797             :                                               ctrlr->ana_log_page_size, 0,
     798             :                                               nvme_completion_poll_cb, status);
     799           4 :         if (rc != 0) {
     800           0 :                 free(status);
     801           0 :                 return rc;
     802             :         }
     803             : 
     804           4 :         if (nvme_wait_for_completion_robust_lock_timeout(ctrlr->adminq, status, &ctrlr->ctrlr_lock,
     805           4 :                         ctrlr->opts.admin_timeout_ms * 1000)) {
     806           0 :                 if (!status->timed_out) {
     807           0 :                         free(status);
     808             :                 }
     809           0 :                 return -EIO;
     810             :         }
     811             : 
     812           4 :         free(status);
     813           4 :         return 0;
     814             : }
     815             : 
     816             : static int
     817           5 : nvme_ctrlr_update_ns_ana_states(const struct spdk_nvme_ana_group_descriptor *desc,
     818             :                                 void *cb_arg)
     819             : {
     820           5 :         struct spdk_nvme_ctrlr *ctrlr = cb_arg;
     821             :         struct spdk_nvme_ns *ns;
     822             :         uint32_t i, nsid;
     823             : 
     824          14 :         for (i = 0; i < desc->num_of_nsid; i++) {
     825           9 :                 nsid = desc->nsid[i];
     826           9 :                 if (nsid == 0 || nsid > ctrlr->cdata.nn) {
     827           0 :                         continue;
     828             :                 }
     829             : 
     830           9 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
     831           9 :                 assert(ns != NULL);
     832             : 
     833           9 :                 ns->ana_group_id = desc->ana_group_id;
     834           9 :                 ns->ana_state = desc->ana_state;
     835             :         }
     836             : 
     837           5 :         return 0;
     838             : }
     839             : 
     840             : int
     841           4 : nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
     842             :                               spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg)
     843             : {
     844             :         struct spdk_nvme_ana_group_descriptor *copied_desc;
     845             :         uint8_t *orig_desc;
     846             :         uint32_t i, desc_size, copy_len;
     847           4 :         int rc = 0;
     848             : 
     849           4 :         if (ctrlr->ana_log_page == NULL) {
     850           0 :                 return -EINVAL;
     851             :         }
     852             : 
     853           4 :         copied_desc = ctrlr->copied_ana_desc;
     854             : 
     855           4 :         orig_desc = (uint8_t *)ctrlr->ana_log_page + sizeof(struct spdk_nvme_ana_page);
     856           4 :         copy_len = ctrlr->ana_log_page_size - sizeof(struct spdk_nvme_ana_page);
     857             : 
     858           9 :         for (i = 0; i < ctrlr->ana_log_page->num_ana_group_desc; i++) {
     859           5 :                 memcpy(copied_desc, orig_desc, copy_len);
     860             : 
     861           5 :                 rc = cb_fn(copied_desc, cb_arg);
     862           5 :                 if (rc != 0) {
     863           0 :                         break;
     864             :                 }
     865             : 
     866           5 :                 desc_size = sizeof(struct spdk_nvme_ana_group_descriptor) +
     867           5 :                             copied_desc->num_of_nsid * sizeof(uint32_t);
     868           5 :                 orig_desc += desc_size;
     869           5 :                 copy_len -= desc_size;
     870             :         }
     871             : 
     872           4 :         return rc;
     873             : }
     874             : 
     875             : static int
     876          16 : nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     877             : {
     878          16 :         int     rc = 0;
     879             : 
     880          16 :         memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
     881             :         /* Mandatory pages */
     882          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
     883          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
     884          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
     885          16 :         if (ctrlr->cdata.lpa.celp) {
     886           1 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
     887             :         }
     888             : 
     889          16 :         if (ctrlr->cdata.cmic.ana_reporting) {
     890           2 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] = true;
     891           2 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
     892           2 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
     893           2 :                         if (rc == 0) {
     894           2 :                                 nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
     895             :                                                               ctrlr);
     896             :                         }
     897             :                 }
     898             :         }
     899             : 
     900          16 :         if (ctrlr->cdata.ctratt.bits.fdps) {
     901           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_CONFIGURATIONS] = true;
     902           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_RECLAIM_UNIT_HANDLE_USAGE] = true;
     903           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_STATISTICS] = true;
     904           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_EVENTS] = true;
     905             :         }
     906             : 
     907          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL &&
     908           1 :             ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
     909           1 :             !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
     910           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
     911           1 :                                      ctrlr->opts.admin_timeout_ms);
     912             : 
     913             :         } else {
     914          15 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     915          15 :                                      ctrlr->opts.admin_timeout_ms);
     916             : 
     917             :         }
     918             : 
     919          16 :         return rc;
     920             : }
     921             : 
     922             : static void
     923           1 : nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     924             : {
     925           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
     926           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
     927           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
     928           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
     929           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
     930           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
     931           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
     932           1 : }
     933             : 
     934             : static void
     935          18 : nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
     936             : {
     937             :         uint32_t cdw11;
     938             :         struct nvme_completion_poll_status *status;
     939             : 
     940          18 :         if (ctrlr->opts.arbitration_burst == 0) {
     941          16 :                 return;
     942             :         }
     943             : 
     944           2 :         if (ctrlr->opts.arbitration_burst > 7) {
     945           1 :                 NVME_CTRLR_WARNLOG(ctrlr, "Valid arbitration burst values is from 0-7\n");
     946           1 :                 return;
     947             :         }
     948             : 
     949           1 :         status = calloc(1, sizeof(*status));
     950           1 :         if (!status) {
     951           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     952           0 :                 return;
     953             :         }
     954             : 
     955           1 :         cdw11 = ctrlr->opts.arbitration_burst;
     956             : 
     957           1 :         if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
     958           1 :                 cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
     959           1 :                 cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
     960           1 :                 cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
     961             :         }
     962             : 
     963           1 :         if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
     964             :                                             cdw11, 0, NULL, 0,
     965             :                                             nvme_completion_poll_cb, status) < 0) {
     966           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set arbitration feature failed\n");
     967           0 :                 free(status);
     968           0 :                 return;
     969             :         }
     970             : 
     971           1 :         if (nvme_wait_for_completion_timeout(ctrlr->adminq, status,
     972           1 :                                              ctrlr->opts.admin_timeout_ms * 1000)) {
     973           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Timeout to set arbitration feature\n");
     974             :         }
     975             : 
     976           1 :         if (!status->timed_out) {
     977           1 :                 free(status);
     978             :         }
     979             : }
     980             : 
     981             : static void
     982          16 : nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     983             : {
     984          16 :         memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
     985             :         /* Mandatory features */
     986          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
     987          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
     988          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
     989          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
     990          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
     991          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
     992          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
     993          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
     994          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
     995             :         /* Optional features */
     996          16 :         if (ctrlr->cdata.vwc.present) {
     997           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
     998             :         }
     999          16 :         if (ctrlr->cdata.apsta.supported) {
    1000           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
    1001             :         }
    1002          16 :         if (ctrlr->cdata.hmpre) {
    1003           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
    1004             :         }
    1005          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
    1006           1 :                 nvme_ctrlr_set_intel_supported_features(ctrlr);
    1007             :         }
    1008             : 
    1009          16 :         nvme_ctrlr_set_arbitration_feature(ctrlr);
    1010          16 : }
    1011             : 
    1012             : static void
    1013           1 : nvme_ctrlr_set_host_feature_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1014             : {
    1015           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1016             : 
    1017           1 :         spdk_free(ctrlr->tmp_ptr);
    1018           1 :         ctrlr->tmp_ptr = NULL;
    1019             : 
    1020           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1021           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: SC %x SCT %x\n",
    1022             :                                   cpl->status.sc, cpl->status.sct);
    1023           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1024           0 :                 return;
    1025             :         }
    1026             : 
    1027           1 :         ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] = true;
    1028             : 
    1029           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    1030           1 :                              ctrlr->opts.admin_timeout_ms);
    1031             : }
    1032             : 
    1033             : /* We do not want to do add synchronous operation anymore.
    1034             :  * We set the Host Behavior Support feature asynchronousin in different states.
    1035             :  */
    1036             : static int
    1037          16 : nvme_ctrlr_set_host_feature(struct spdk_nvme_ctrlr *ctrlr)
    1038             : {
    1039             :         struct spdk_nvme_host_behavior *host;
    1040             :         int rc;
    1041             : 
    1042          16 :         if (!ctrlr->cdata.ctratt.bits.elbas) {
    1043          15 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    1044          15 :                                      ctrlr->opts.admin_timeout_ms);
    1045          15 :                 return 0;
    1046             :         }
    1047             : 
    1048           1 :         ctrlr->tmp_ptr = spdk_dma_zmalloc(sizeof(struct spdk_nvme_host_behavior), 4096, NULL);
    1049           1 :         if (!ctrlr->tmp_ptr) {
    1050           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate host behavior support data\n");
    1051           0 :                 rc = -ENOMEM;
    1052           0 :                 goto error;
    1053             :         }
    1054             : 
    1055           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE,
    1056           1 :                              ctrlr->opts.admin_timeout_ms);
    1057             : 
    1058           1 :         host = ctrlr->tmp_ptr;
    1059             : 
    1060           1 :         host->lbafee = 1;
    1061             : 
    1062           1 :         rc = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT,
    1063             :                                              0, 0, host, sizeof(struct spdk_nvme_host_behavior),
    1064             :                                              nvme_ctrlr_set_host_feature_done, ctrlr);
    1065           1 :         if (rc != 0) {
    1066           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: %d\n", rc);
    1067           0 :                 goto error;
    1068             :         }
    1069             : 
    1070           1 :         return 0;
    1071             : 
    1072           0 : error:
    1073           0 :         spdk_free(ctrlr->tmp_ptr);
    1074           0 :         ctrlr->tmp_ptr = NULL;
    1075             : 
    1076           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1077           0 :         return rc;
    1078             : }
    1079             : 
    1080             : bool
    1081           0 : spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
    1082             : {
    1083           0 :         return ctrlr->is_failed;
    1084             : }
    1085             : 
    1086             : void
    1087           1 : nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
    1088             : {
    1089             :         /*
    1090             :          * Set the flag here and leave the work failure of qpairs to
    1091             :          * spdk_nvme_qpair_process_completions().
    1092             :          */
    1093           1 :         if (hot_remove) {
    1094           0 :                 ctrlr->is_removed = true;
    1095             :         }
    1096             : 
    1097           1 :         if (ctrlr->is_failed) {
    1098           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "already in failed state\n");
    1099           0 :                 return;
    1100             :         }
    1101             : 
    1102           1 :         if (ctrlr->is_disconnecting) {
    1103           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "already disconnecting\n");
    1104           0 :                 return;
    1105             :         }
    1106             : 
    1107           1 :         ctrlr->is_failed = true;
    1108           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1109           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1110           1 :         NVME_CTRLR_ERRLOG(ctrlr, "in failed state.\n");
    1111             : }
    1112             : 
    1113             : /**
    1114             :  * This public API function will try to take the controller lock.
    1115             :  * Any private functions being called from a thread already holding
    1116             :  * the ctrlr lock should call nvme_ctrlr_fail directly.
    1117             :  */
    1118             : void
    1119           0 : spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
    1120             : {
    1121           0 :         nvme_ctrlr_lock(ctrlr);
    1122           0 :         nvme_ctrlr_fail(ctrlr, false);
    1123           0 :         nvme_ctrlr_unlock(ctrlr);
    1124           0 : }
    1125             : 
    1126             : static void
    1127          39 : nvme_ctrlr_shutdown_set_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1128             : {
    1129          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1130          39 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1131             : 
    1132          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1133           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1134           0 :                 ctx->shutdown_complete = true;
    1135           0 :                 return;
    1136             :         }
    1137             : 
    1138          39 :         if (ctrlr->opts.no_shn_notification) {
    1139           0 :                 ctx->shutdown_complete = true;
    1140           0 :                 return;
    1141             :         }
    1142             : 
    1143             :         /*
    1144             :          * The NVMe specification defines RTD3E to be the time between
    1145             :          *  setting SHN = 1 until the controller will set SHST = 10b.
    1146             :          * If the device doesn't report RTD3 entry latency, or if it
    1147             :          *  reports RTD3 entry latency less than 10 seconds, pick
    1148             :          *  10 seconds as a reasonable amount of time to
    1149             :          *  wait before proceeding.
    1150             :          */
    1151          39 :         NVME_CTRLR_DEBUGLOG(ctrlr, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
    1152          39 :         ctx->shutdown_timeout_ms = SPDK_CEIL_DIV(ctrlr->cdata.rtd3e, 1000);
    1153          39 :         ctx->shutdown_timeout_ms = spdk_max(ctx->shutdown_timeout_ms, 10000);
    1154          39 :         NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown timeout = %" PRIu32 " ms\n", ctx->shutdown_timeout_ms);
    1155             : 
    1156          39 :         ctx->shutdown_start_tsc = spdk_get_ticks();
    1157          39 :         ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1158             : }
    1159             : 
    1160             : static void
    1161          39 : nvme_ctrlr_shutdown_get_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1162             : {
    1163          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1164          39 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1165             :         union spdk_nvme_cc_register cc;
    1166             :         int rc;
    1167             : 
    1168          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1169           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1170           0 :                 ctx->shutdown_complete = true;
    1171           0 :                 return;
    1172             :         }
    1173             : 
    1174          39 :         assert(value <= UINT32_MAX);
    1175          39 :         cc.raw = (uint32_t)value;
    1176             : 
    1177          39 :         if (ctrlr->opts.no_shn_notification) {
    1178           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Disable SSD without shutdown notification\n");
    1179           0 :                 if (cc.bits.en == 0) {
    1180           0 :                         ctx->shutdown_complete = true;
    1181           0 :                         return;
    1182             :                 }
    1183             : 
    1184           0 :                 cc.bits.en = 0;
    1185             :         } else {
    1186          39 :                 cc.bits.shn = SPDK_NVME_SHN_NORMAL;
    1187             :         }
    1188             : 
    1189          39 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_shutdown_set_cc_done, ctx);
    1190          39 :         if (rc != 0) {
    1191           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1192           0 :                 ctx->shutdown_complete = true;
    1193             :         }
    1194             : }
    1195             : 
    1196             : static void
    1197          47 : nvme_ctrlr_shutdown_async(struct spdk_nvme_ctrlr *ctrlr,
    1198             :                           struct nvme_ctrlr_detach_ctx *ctx)
    1199             : {
    1200             :         int rc;
    1201             : 
    1202          47 :         if (ctrlr->is_removed) {
    1203           0 :                 ctx->shutdown_complete = true;
    1204           0 :                 return;
    1205             :         }
    1206             : 
    1207          47 :         if (ctrlr->adminq == NULL ||
    1208          40 :             ctrlr->adminq->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
    1209           8 :                 NVME_CTRLR_INFOLOG(ctrlr, "Adminq is not connected.\n");
    1210           8 :                 ctx->shutdown_complete = true;
    1211           8 :                 return;
    1212             :         }
    1213             : 
    1214          39 :         ctx->state = NVME_CTRLR_DETACH_SET_CC;
    1215          39 :         rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_shutdown_get_cc_done, ctx);
    1216          39 :         if (rc != 0) {
    1217           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1218           0 :                 ctx->shutdown_complete = true;
    1219             :         }
    1220             : }
    1221             : 
    1222             : static void
    1223          39 : nvme_ctrlr_shutdown_get_csts_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1224             : {
    1225          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1226             : 
    1227          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1228           0 :                 NVME_CTRLR_ERRLOG(ctx->ctrlr, "Failed to read the CSTS register\n");
    1229           0 :                 ctx->shutdown_complete = true;
    1230           0 :                 return;
    1231             :         }
    1232             : 
    1233          39 :         assert(value <= UINT32_MAX);
    1234          39 :         ctx->csts.raw = (uint32_t)value;
    1235          39 :         ctx->state = NVME_CTRLR_DETACH_GET_CSTS_DONE;
    1236             : }
    1237             : 
    1238             : static int
    1239          78 : nvme_ctrlr_shutdown_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    1240             :                                struct nvme_ctrlr_detach_ctx *ctx)
    1241             : {
    1242             :         union spdk_nvme_csts_register   csts;
    1243             :         uint32_t                        ms_waited;
    1244             : 
    1245          78 :         switch (ctx->state) {
    1246           0 :         case NVME_CTRLR_DETACH_SET_CC:
    1247             :         case NVME_CTRLR_DETACH_GET_CSTS:
    1248             :                 /* We're still waiting for the register operation to complete */
    1249           0 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    1250           0 :                 return -EAGAIN;
    1251             : 
    1252          39 :         case NVME_CTRLR_DETACH_CHECK_CSTS:
    1253          39 :                 ctx->state = NVME_CTRLR_DETACH_GET_CSTS;
    1254          39 :                 if (nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_shutdown_get_csts_done, ctx)) {
    1255           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    1256           0 :                         return -EIO;
    1257             :                 }
    1258          39 :                 return -EAGAIN;
    1259             : 
    1260          39 :         case NVME_CTRLR_DETACH_GET_CSTS_DONE:
    1261          39 :                 ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1262          39 :                 break;
    1263             : 
    1264           0 :         default:
    1265           0 :                 assert(0 && "Should never happen");
    1266             :                 return -EINVAL;
    1267             :         }
    1268             : 
    1269          39 :         ms_waited = (spdk_get_ticks() - ctx->shutdown_start_tsc) * 1000 / spdk_get_ticks_hz();
    1270          39 :         csts.raw = ctx->csts.raw;
    1271             : 
    1272          39 :         if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
    1273          39 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown complete in %u milliseconds\n", ms_waited);
    1274          39 :                 return 0;
    1275             :         }
    1276             : 
    1277           0 :         if (ms_waited < ctx->shutdown_timeout_ms) {
    1278           0 :                 return -EAGAIN;
    1279             :         }
    1280             : 
    1281           0 :         NVME_CTRLR_ERRLOG(ctrlr, "did not shutdown within %u milliseconds\n",
    1282             :                           ctx->shutdown_timeout_ms);
    1283           0 :         if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
    1284           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
    1285             :         }
    1286             : 
    1287           0 :         return 0;
    1288             : }
    1289             : 
    1290             : static inline uint64_t
    1291         509 : nvme_ctrlr_get_ready_timeout(struct spdk_nvme_ctrlr *ctrlr)
    1292             : {
    1293         509 :         return ctrlr->cap.bits.to * 500;
    1294             : }
    1295             : 
    1296             : static void
    1297          14 : nvme_ctrlr_set_cc_en_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1298             : {
    1299          14 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    1300             : 
    1301          14 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1302           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to set the CC register\n");
    1303           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1304           0 :                 return;
    1305             :         }
    1306             : 
    1307          14 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    1308             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    1309             : }
    1310             : 
    1311             : static int
    1312          21 : nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
    1313             : {
    1314             :         union spdk_nvme_cc_register     cc;
    1315             :         int                             rc;
    1316             : 
    1317          21 :         rc = nvme_transport_ctrlr_enable(ctrlr);
    1318          21 :         if (rc != 0) {
    1319           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "transport ctrlr_enable failed\n");
    1320           0 :                 return rc;
    1321             :         }
    1322             : 
    1323          21 :         cc.raw = ctrlr->process_init_cc.raw;
    1324          21 :         if (cc.bits.en != 0) {
    1325           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "called with CC.EN = 1\n");
    1326           0 :                 return -EINVAL;
    1327             :         }
    1328             : 
    1329          21 :         cc.bits.en = 1;
    1330          21 :         cc.bits.css = 0;
    1331          21 :         cc.bits.shn = 0;
    1332          21 :         cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
    1333          21 :         cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
    1334             : 
    1335             :         /* Page size is 2 ^ (12 + mps). */
    1336          21 :         cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
    1337             : 
    1338             :         /*
    1339             :          * Since NVMe 1.0, a controller should have at least one bit set in CAP.CSS.
    1340             :          * A controller that does not have any bit set in CAP.CSS is not spec compliant.
    1341             :          * Try to support such a controller regardless.
    1342             :          */
    1343          21 :         if (ctrlr->cap.bits.css == 0) {
    1344          21 :                 NVME_CTRLR_INFOLOG(ctrlr, "Drive reports no command sets supported. Assuming NVM is supported.\n");
    1345          21 :                 ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
    1346             :         }
    1347             : 
    1348             :         /*
    1349             :          * If the user did not explicitly request a command set, or supplied a value larger than
    1350             :          * what can be saved in CC.CSS, use the most reasonable default.
    1351             :          */
    1352          21 :         if (ctrlr->opts.command_set >= CHAR_BIT) {
    1353           0 :                 if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) {
    1354           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_IOCS;
    1355           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NVM) {
    1356           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1357           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NOIO) {
    1358             :                         /* Technically we should respond with CC_CSS_NOIO in
    1359             :                          * this case, but we use NVM instead to work around
    1360             :                          * buggy targets and to match Linux driver behavior.
    1361             :                          */
    1362           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1363             :                 } else {
    1364             :                         /* Invalid supported bits detected, falling back to NVM. */
    1365           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1366             :                 }
    1367             :         }
    1368             : 
    1369             :         /* Verify that the selected command set is supported by the controller. */
    1370          21 :         if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
    1371           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Requested I/O command set %u but supported mask is 0x%x\n",
    1372             :                                     ctrlr->opts.command_set, ctrlr->cap.bits.css);
    1373           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Falling back to NVM. Assuming NVM is supported.\n");
    1374           0 :                 ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1375             :         }
    1376             : 
    1377          21 :         cc.bits.css = ctrlr->opts.command_set;
    1378             : 
    1379          21 :         switch (ctrlr->opts.arb_mechanism) {
    1380          10 :         case SPDK_NVME_CC_AMS_RR:
    1381          10 :                 break;
    1382           4 :         case SPDK_NVME_CC_AMS_WRR:
    1383           4 :                 if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
    1384           2 :                         break;
    1385             :                 }
    1386           2 :                 return -EINVAL;
    1387           4 :         case SPDK_NVME_CC_AMS_VS:
    1388           4 :                 if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
    1389           2 :                         break;
    1390             :                 }
    1391           2 :                 return -EINVAL;
    1392           3 :         default:
    1393           3 :                 return -EINVAL;
    1394             :         }
    1395             : 
    1396          14 :         cc.bits.ams = ctrlr->opts.arb_mechanism;
    1397          14 :         ctrlr->process_init_cc.raw = cc.raw;
    1398             : 
    1399          14 :         if (nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_set_cc_en_done, ctrlr)) {
    1400           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    1401           0 :                 return -EIO;
    1402             :         }
    1403             : 
    1404          14 :         return 0;
    1405             : }
    1406             : 
    1407             : static const char *
    1408           1 : nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
    1409             : {
    1410           1 :         switch (state) {
    1411           0 :         case NVME_CTRLR_STATE_INIT_DELAY:
    1412           0 :                 return "delay init";
    1413           0 :         case NVME_CTRLR_STATE_CONNECT_ADMINQ:
    1414           0 :                 return "connect adminq";
    1415           0 :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    1416           0 :                 return "wait for connect adminq";
    1417           0 :         case NVME_CTRLR_STATE_READ_VS:
    1418           0 :                 return "read vs";
    1419           0 :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    1420           0 :                 return "read vs wait for vs";
    1421           0 :         case NVME_CTRLR_STATE_READ_CAP:
    1422           0 :                 return "read cap";
    1423           0 :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    1424           0 :                 return "read cap wait for cap";
    1425           0 :         case NVME_CTRLR_STATE_CHECK_EN:
    1426           0 :                 return "check en";
    1427           0 :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    1428           0 :                 return "check en wait for cc";
    1429           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    1430           0 :                 return "disable and wait for CSTS.RDY = 1";
    1431           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1432           0 :                 return "disable and wait for CSTS.RDY = 1 reg";
    1433           0 :         case NVME_CTRLR_STATE_SET_EN_0:
    1434           0 :                 return "set CC.EN = 0";
    1435           0 :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    1436           0 :                 return "set CC.EN = 0 wait for cc";
    1437           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    1438           0 :                 return "disable and wait for CSTS.RDY = 0";
    1439           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    1440           0 :                 return "disable and wait for CSTS.RDY = 0 reg";
    1441           0 :         case NVME_CTRLR_STATE_DISABLED:
    1442           0 :                 return "controller is disabled";
    1443           0 :         case NVME_CTRLR_STATE_ENABLE:
    1444           0 :                 return "enable controller by writing CC.EN = 1";
    1445           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    1446           0 :                 return "enable controller by writing CC.EN = 1 reg";
    1447           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    1448           0 :                 return "wait for CSTS.RDY = 1";
    1449           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1450           0 :                 return "wait for CSTS.RDY = 1 reg";
    1451           0 :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    1452           0 :                 return "reset admin queue";
    1453           0 :         case NVME_CTRLR_STATE_IDENTIFY:
    1454           0 :                 return "identify controller";
    1455           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    1456           0 :                 return "wait for identify controller";
    1457           0 :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    1458           0 :                 return "configure AER";
    1459           0 :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    1460           0 :                 return "wait for configure aer";
    1461           0 :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    1462           0 :                 return "set keep alive timeout";
    1463           0 :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    1464           0 :                 return "wait for set keep alive timeout";
    1465           0 :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    1466           0 :                 return "identify controller iocs specific";
    1467           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    1468           0 :                 return "wait for identify controller iocs specific";
    1469           0 :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    1470           0 :                 return "get zns cmd and effects log page";
    1471           0 :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    1472           0 :                 return "wait for get zns cmd and effects log page";
    1473           0 :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    1474           0 :                 return "set number of queues";
    1475           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    1476           0 :                 return "wait for set number of queues";
    1477           0 :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    1478           0 :                 return "identify active ns";
    1479           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    1480           0 :                 return "wait for identify active ns";
    1481           0 :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    1482           0 :                 return "identify ns";
    1483           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    1484           0 :                 return "wait for identify ns";
    1485           0 :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    1486           0 :                 return "identify namespace id descriptors";
    1487           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    1488           0 :                 return "wait for identify namespace id descriptors";
    1489           0 :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    1490           0 :                 return "identify ns iocs specific";
    1491           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    1492           0 :                 return "wait for identify ns iocs specific";
    1493           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    1494           0 :                 return "set supported log pages";
    1495           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    1496           0 :                 return "set supported INTEL log pages";
    1497           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    1498           0 :                 return "wait for supported INTEL log pages";
    1499           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    1500           0 :                 return "set supported features";
    1501           0 :         case NVME_CTRLR_STATE_SET_HOST_FEATURE:
    1502           0 :                 return "set host behavior support feature";
    1503           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
    1504           0 :                 return "wait for set host behavior support feature";
    1505           0 :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    1506           0 :                 return "set doorbell buffer config";
    1507           0 :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    1508           0 :                 return "wait for doorbell buffer config";
    1509           0 :         case NVME_CTRLR_STATE_SET_HOST_ID:
    1510           0 :                 return "set host ID";
    1511           0 :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    1512           0 :                 return "wait for set host ID";
    1513           0 :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    1514           0 :                 return "transport ready";
    1515           0 :         case NVME_CTRLR_STATE_READY:
    1516           0 :                 return "ready";
    1517           1 :         case NVME_CTRLR_STATE_ERROR:
    1518           1 :                 return "error";
    1519           0 :         case NVME_CTRLR_STATE_DISCONNECTED:
    1520           0 :                 return "disconnected";
    1521             :         }
    1522           0 :         return "unknown";
    1523             : };
    1524             : 
    1525             : static void
    1526         732 : _nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1527             :                       uint64_t timeout_in_ms, bool quiet)
    1528             : {
    1529             :         uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
    1530             : 
    1531         732 :         ctrlr->state = state;
    1532         732 :         if (timeout_in_ms == NVME_TIMEOUT_KEEP_EXISTING) {
    1533          33 :                 if (!quiet) {
    1534           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (keeping existing timeout)\n",
    1535             :                                             nvme_ctrlr_state_string(ctrlr->state));
    1536             :                 }
    1537          33 :                 return;
    1538             :         }
    1539             : 
    1540         699 :         if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
    1541         697 :                 goto inf;
    1542             :         }
    1543             : 
    1544           2 :         ticks_per_ms = spdk_get_ticks_hz() / 1000;
    1545           2 :         if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
    1546           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1547             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1548           0 :                 goto inf;
    1549             :         }
    1550             : 
    1551           2 :         now_ticks = spdk_get_ticks();
    1552           2 :         timeout_in_ticks = timeout_in_ms * ticks_per_ms;
    1553           2 :         if (timeout_in_ticks > UINT64_MAX - now_ticks) {
    1554           1 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1555             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1556           1 :                 goto inf;
    1557             :         }
    1558             : 
    1559           1 :         ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
    1560           1 :         if (!quiet) {
    1561           1 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (timeout %" PRIu64 " ms)\n",
    1562             :                                     nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
    1563             :         }
    1564           1 :         return;
    1565         698 : inf:
    1566         698 :         if (!quiet) {
    1567         698 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (no timeout)\n",
    1568             :                                     nvme_ctrlr_state_string(ctrlr->state));
    1569             :         }
    1570         698 :         ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
    1571             : }
    1572             : 
    1573             : static void
    1574         699 : nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1575             :                      uint64_t timeout_in_ms)
    1576             : {
    1577         699 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, false);
    1578         699 : }
    1579             : 
    1580             : static void
    1581          33 : nvme_ctrlr_set_state_quiet(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1582             :                            uint64_t timeout_in_ms)
    1583             : {
    1584          33 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, true);
    1585          33 : }
    1586             : 
    1587             : static void
    1588          48 : nvme_ctrlr_free_zns_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1589             : {
    1590          48 :         spdk_free(ctrlr->cdata_zns);
    1591          48 :         ctrlr->cdata_zns = NULL;
    1592          48 : }
    1593             : 
    1594             : static void
    1595          48 : nvme_ctrlr_free_iocs_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1596             : {
    1597          48 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    1598          48 : }
    1599             : 
    1600             : static void
    1601          49 : nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
    1602             : {
    1603          49 :         if (ctrlr->shadow_doorbell) {
    1604           1 :                 spdk_free(ctrlr->shadow_doorbell);
    1605           1 :                 ctrlr->shadow_doorbell = NULL;
    1606             :         }
    1607             : 
    1608          49 :         if (ctrlr->eventidx) {
    1609           1 :                 spdk_free(ctrlr->eventidx);
    1610           1 :                 ctrlr->eventidx = NULL;
    1611             :         }
    1612          49 : }
    1613             : 
    1614             : static void
    1615           1 : nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1616             : {
    1617           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1618             : 
    1619           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1620           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Doorbell buffer config failed\n");
    1621             :         } else {
    1622           1 :                 NVME_CTRLR_INFOLOG(ctrlr, "Doorbell buffer config enabled\n");
    1623             :         }
    1624           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1625           1 :                              ctrlr->opts.admin_timeout_ms);
    1626           1 : }
    1627             : 
    1628             : static int
    1629          15 : nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
    1630             : {
    1631          15 :         int rc = 0;
    1632          15 :         uint64_t prp1, prp2, len;
    1633             : 
    1634          15 :         if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
    1635          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1636          14 :                                      ctrlr->opts.admin_timeout_ms);
    1637          14 :                 return 0;
    1638             :         }
    1639             : 
    1640           1 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    1641           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1642           0 :                                      ctrlr->opts.admin_timeout_ms);
    1643           0 :                 return 0;
    1644             :         }
    1645             : 
    1646             :         /* only 1 page size for doorbell buffer */
    1647           1 :         ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1648             :                                               NULL, SPDK_ENV_LCORE_ID_ANY,
    1649             :                                               SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1650           1 :         if (ctrlr->shadow_doorbell == NULL) {
    1651           0 :                 rc = -ENOMEM;
    1652           0 :                 goto error;
    1653             :         }
    1654             : 
    1655           1 :         len = ctrlr->page_size;
    1656           1 :         prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
    1657           1 :         if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1658           0 :                 rc = -EFAULT;
    1659           0 :                 goto error;
    1660             :         }
    1661             : 
    1662           1 :         ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1663             :                                        NULL, SPDK_ENV_LCORE_ID_ANY,
    1664             :                                        SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1665           1 :         if (ctrlr->eventidx == NULL) {
    1666           0 :                 rc = -ENOMEM;
    1667           0 :                 goto error;
    1668             :         }
    1669             : 
    1670           1 :         len = ctrlr->page_size;
    1671           1 :         prp2 = spdk_vtophys(ctrlr->eventidx, &len);
    1672           1 :         if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1673           0 :                 rc = -EFAULT;
    1674           0 :                 goto error;
    1675             :         }
    1676             : 
    1677           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
    1678           1 :                              ctrlr->opts.admin_timeout_ms);
    1679             : 
    1680           1 :         rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
    1681             :                         nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
    1682           1 :         if (rc != 0) {
    1683           0 :                 goto error;
    1684             :         }
    1685             : 
    1686           1 :         return 0;
    1687             : 
    1688           0 : error:
    1689           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1690           0 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1691           0 :         return rc;
    1692             : }
    1693             : 
    1694             : void
    1695          48 : nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr)
    1696             : {
    1697             :         struct nvme_request     *req, *tmp;
    1698          48 :         struct spdk_nvme_cpl    cpl = {};
    1699             : 
    1700          48 :         cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
    1701          48 :         cpl.status.sct = SPDK_NVME_SCT_GENERIC;
    1702             : 
    1703          48 :         STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
    1704           0 :                 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
    1705           0 :                 ctrlr->outstanding_aborts++;
    1706             : 
    1707           0 :                 nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, &cpl);
    1708             :         }
    1709          48 : }
    1710             : 
    1711             : static int
    1712           2 : nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1713             : {
    1714           2 :         if (ctrlr->is_resetting || ctrlr->is_removed) {
    1715             :                 /*
    1716             :                  * Controller is already resetting or has been removed. Return
    1717             :                  *  immediately since there is no need to kick off another
    1718             :                  *  reset in these cases.
    1719             :                  */
    1720           1 :                 return ctrlr->is_resetting ? -EBUSY : -ENXIO;
    1721             :         }
    1722             : 
    1723           1 :         ctrlr->is_resetting = true;
    1724           1 :         ctrlr->is_failed = false;
    1725           1 :         ctrlr->is_disconnecting = true;
    1726           1 :         ctrlr->prepare_for_reset = true;
    1727             : 
    1728           1 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting controller\n");
    1729             : 
    1730             :         /* Disable keep-alive, it'll be re-enabled as part of the init process */
    1731           1 :         ctrlr->keep_alive_interval_ticks = 0;
    1732             : 
    1733             :         /* Abort all of the queued abort requests */
    1734           1 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    1735             : 
    1736           1 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    1737             : 
    1738           1 :         ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1739           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1740             : 
    1741           1 :         return 0;
    1742             : }
    1743             : 
    1744             : static void
    1745           1 : nvme_ctrlr_disconnect_done(struct spdk_nvme_ctrlr *ctrlr)
    1746             : {
    1747           1 :         assert(ctrlr->is_failed == false);
    1748           1 :         ctrlr->is_disconnecting = false;
    1749             : 
    1750             :         /* Doorbell buffer config is invalid during reset */
    1751           1 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1752             : 
    1753             :         /* I/O Command Set Specific Identify Controller data is invalidated during reset */
    1754           1 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    1755             : 
    1756           1 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    1757             : 
    1758             :         /* Set the state back to DISCONNECTED to cause a full hardware reset. */
    1759           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISCONNECTED, NVME_TIMEOUT_INFINITE);
    1760           1 : }
    1761             : 
    1762             : int
    1763           0 : spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1764             : {
    1765             :         int rc;
    1766             : 
    1767           0 :         nvme_ctrlr_lock(ctrlr);
    1768           0 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1769           0 :         nvme_ctrlr_unlock(ctrlr);
    1770             : 
    1771           0 :         return rc;
    1772             : }
    1773             : 
    1774             : void
    1775           1 : spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
    1776             : {
    1777           1 :         nvme_ctrlr_lock(ctrlr);
    1778             : 
    1779           1 :         ctrlr->prepare_for_reset = false;
    1780             : 
    1781             :         /* Set the state back to INIT to cause a full hardware reset. */
    1782           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    1783             : 
    1784             :         /* Return without releasing ctrlr_lock. ctrlr_lock will be released when
    1785             :          * spdk_nvme_ctrlr_reset_poll_async() returns 0.
    1786             :          */
    1787           1 : }
    1788             : 
    1789             : int
    1790           0 : nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
    1791             : {
    1792             :         bool async;
    1793             :         int rc;
    1794             : 
    1795           0 :         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc ||
    1796           0 :             spdk_nvme_ctrlr_is_fabrics(ctrlr) || nvme_qpair_is_admin_queue(qpair)) {
    1797           0 :                 assert(false);
    1798             :                 return -EINVAL;
    1799             :         }
    1800             : 
    1801             :         /* Force a synchronous connect. */
    1802           0 :         async = qpair->async;
    1803           0 :         qpair->async = false;
    1804           0 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
    1805           0 :         qpair->async = async;
    1806             : 
    1807           0 :         if (rc != 0) {
    1808           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1809             :         }
    1810             : 
    1811           0 :         return rc;
    1812             : }
    1813             : 
    1814             : /**
    1815             :  * This function will be called when the controller is being reinitialized.
    1816             :  * Note: the ctrlr_lock must be held when calling this function.
    1817             :  */
    1818             : int
    1819          25 : spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
    1820             : {
    1821             :         struct spdk_nvme_ns *ns, *tmp_ns;
    1822             :         struct spdk_nvme_qpair  *qpair;
    1823          25 :         int rc = 0, rc_tmp = 0;
    1824             : 
    1825          25 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1826           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "controller reinitialization failed\n");
    1827           0 :                 rc = -1;
    1828             :         }
    1829          25 :         if (ctrlr->state != NVME_CTRLR_STATE_READY && rc != -1) {
    1830          24 :                 return -EAGAIN;
    1831             :         }
    1832             : 
    1833             :         /*
    1834             :          * For non-fabrics controllers, the memory locations of the transport qpair
    1835             :          * don't change when the controller is reset. They simply need to be
    1836             :          * re-enabled with admin commands to the controller. For fabric
    1837             :          * controllers we need to disconnect and reconnect the qpair on its
    1838             :          * own thread outside of the context of the reset.
    1839             :          */
    1840           1 :         if (rc == 0 && !spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    1841             :                 /* Reinitialize qpairs */
    1842           1 :                 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1843             :                         /* Always clear the qid bit here, even for a foreign qpair. We need
    1844             :                          * to make sure another process doesn't get the chance to grab that
    1845             :                          * qid.
    1846             :                          */
    1847           0 :                         assert(spdk_bit_array_get(ctrlr->free_io_qids, qpair->id));
    1848           0 :                         spdk_bit_array_clear(ctrlr->free_io_qids, qpair->id);
    1849           0 :                         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc) {
    1850             :                                 /*
    1851             :                                  * We cannot reinitialize a foreign qpair. The qpair's owning
    1852             :                                  * process will take care of it. Set failure reason to FAILURE_RESET
    1853             :                                  * to ensure that happens.
    1854             :                                  */
    1855           0 :                                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_RESET;
    1856           0 :                                 continue;
    1857             :                         }
    1858           0 :                         rc_tmp = nvme_ctrlr_reinitialize_io_qpair(ctrlr, qpair);
    1859           0 :                         if (rc_tmp != 0) {
    1860           0 :                                 rc = rc_tmp;
    1861             :                         }
    1862             :                 }
    1863             :         }
    1864             : 
    1865             :         /*
    1866             :          * Take this opportunity to remove inactive namespaces. During a reset namespace
    1867             :          * handles can be invalidated.
    1868             :          */
    1869           5 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    1870           4 :                 if (!ns->active) {
    1871           1 :                         RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    1872           1 :                         spdk_free(ns);
    1873             :                 }
    1874             :         }
    1875             : 
    1876           1 :         if (rc) {
    1877           0 :                 nvme_ctrlr_fail(ctrlr, false);
    1878             :         }
    1879           1 :         ctrlr->is_resetting = false;
    1880             : 
    1881           1 :         nvme_ctrlr_unlock(ctrlr);
    1882             : 
    1883           1 :         if (!ctrlr->cdata.oaes.ns_attribute_notices) {
    1884             :                 /*
    1885             :                  * If controller doesn't support ns_attribute_notices and
    1886             :                  * namespace attributes change (e.g. number of namespaces)
    1887             :                  * we need to update system handling device reset.
    1888             :                  */
    1889           1 :                 nvme_io_msg_ctrlr_update(ctrlr);
    1890             :         }
    1891             : 
    1892           1 :         return rc;
    1893             : }
    1894             : 
    1895             : /*
    1896             :  * For PCIe transport, spdk_nvme_ctrlr_disconnect() will do a Controller Level Reset
    1897             :  * (Change CC.EN from 1 to 0) as a operation to disconnect the admin qpair.
    1898             :  * The following two functions are added to do a Controller Level Reset. They have
    1899             :  * to be called under the nvme controller's lock.
    1900             :  */
    1901             : void
    1902           1 : nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
    1903             : {
    1904           1 :         assert(ctrlr->is_disconnecting == true);
    1905             : 
    1906           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    1907           1 : }
    1908             : 
    1909             : int
    1910           2 : nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr)
    1911             : {
    1912           2 :         int rc = 0;
    1913             : 
    1914           2 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1915           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to disable controller\n");
    1916           0 :                 rc = -1;
    1917             :         }
    1918             : 
    1919           2 :         if (ctrlr->state != NVME_CTRLR_STATE_DISABLED && rc != -1) {
    1920           1 :                 return -EAGAIN;
    1921             :         }
    1922             : 
    1923           1 :         return rc;
    1924             : }
    1925             : 
    1926             : static void
    1927           1 : nvme_ctrlr_fail_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
    1928             : {
    1929             :         struct spdk_nvme_qpair  *qpair;
    1930             : 
    1931           1 :         TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1932           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1933             :         }
    1934           1 : }
    1935             : 
    1936             : int
    1937           2 : spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
    1938             : {
    1939             :         int rc;
    1940             : 
    1941           2 :         nvme_ctrlr_lock(ctrlr);
    1942             : 
    1943           2 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1944           2 :         if (rc == 0) {
    1945           1 :                 nvme_ctrlr_fail_io_qpairs(ctrlr);
    1946             :         }
    1947             : 
    1948           2 :         nvme_ctrlr_unlock(ctrlr);
    1949             : 
    1950           2 :         if (rc != 0) {
    1951           1 :                 if (rc == -EBUSY) {
    1952           1 :                         rc = 0;
    1953             :                 }
    1954           1 :                 return rc;
    1955             :         }
    1956             : 
    1957             :         while (1) {
    1958           1 :                 rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr);
    1959           1 :                 if (rc == -ENXIO) {
    1960           1 :                         break;
    1961             :                 }
    1962             :         }
    1963             : 
    1964           1 :         spdk_nvme_ctrlr_reconnect_async(ctrlr);
    1965             : 
    1966             :         while (true) {
    1967          25 :                 rc = spdk_nvme_ctrlr_reconnect_poll_async(ctrlr);
    1968          25 :                 if (rc != -EAGAIN) {
    1969           1 :                         break;
    1970             :                 }
    1971             :         }
    1972             : 
    1973           1 :         return rc;
    1974             : }
    1975             : 
    1976             : int
    1977           0 : spdk_nvme_ctrlr_reset_subsystem(struct spdk_nvme_ctrlr *ctrlr)
    1978             : {
    1979             :         union spdk_nvme_cap_register cap;
    1980           0 :         int rc = 0;
    1981             : 
    1982           0 :         cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
    1983           0 :         if (cap.bits.nssrs == 0) {
    1984           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "subsystem reset is not supported\n");
    1985           0 :                 return -ENOTSUP;
    1986             :         }
    1987             : 
    1988           0 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting subsystem\n");
    1989           0 :         nvme_ctrlr_lock(ctrlr);
    1990           0 :         ctrlr->is_resetting = true;
    1991           0 :         rc = nvme_ctrlr_set_nssr(ctrlr, SPDK_NVME_NSSR_VALUE);
    1992           0 :         ctrlr->is_resetting = false;
    1993             : 
    1994           0 :         nvme_ctrlr_unlock(ctrlr);
    1995             :         /*
    1996             :          * No more cleanup at this point like in the ctrlr reset. A subsystem reset will cause
    1997             :          * a hot remove for PCIe transport. The hot remove handling does all the necessary ctrlr cleanup.
    1998             :          */
    1999           0 :         return rc;
    2000             : }
    2001             : 
    2002             : int
    2003           4 : spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
    2004             : {
    2005           4 :         int rc = 0;
    2006             : 
    2007           4 :         nvme_ctrlr_lock(ctrlr);
    2008             : 
    2009           4 :         if (ctrlr->is_failed == false) {
    2010           1 :                 rc = -EPERM;
    2011           1 :                 goto out;
    2012             :         }
    2013             : 
    2014           3 :         if (trid->trtype != ctrlr->trid.trtype) {
    2015           1 :                 rc = -EINVAL;
    2016           1 :                 goto out;
    2017             :         }
    2018             : 
    2019           2 :         if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
    2020           1 :                 rc = -EINVAL;
    2021           1 :                 goto out;
    2022             :         }
    2023             : 
    2024           1 :         ctrlr->trid = *trid;
    2025             : 
    2026           4 : out:
    2027           4 :         nvme_ctrlr_unlock(ctrlr);
    2028           4 :         return rc;
    2029             : }
    2030             : 
    2031             : void
    2032           0 : spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
    2033             :                               spdk_nvme_remove_cb remove_cb, void *remove_ctx)
    2034             : {
    2035           0 :         if (!spdk_process_is_primary()) {
    2036           0 :                 return;
    2037             :         }
    2038             : 
    2039           0 :         nvme_ctrlr_lock(ctrlr);
    2040           0 :         ctrlr->remove_cb = remove_cb;
    2041           0 :         ctrlr->cb_ctx = remove_ctx;
    2042           0 :         nvme_ctrlr_unlock(ctrlr);
    2043             : }
    2044             : 
    2045             : int
    2046           0 : spdk_nvme_ctrlr_set_keys(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts)
    2047             : {
    2048           0 :         nvme_ctrlr_lock(ctrlr);
    2049           0 :         if (SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key) == NULL &&
    2050           0 :             SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key) != NULL) {
    2051           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "DH-HMAC-CHAP controller key requires host key to be set\n");
    2052           0 :                 nvme_ctrlr_unlock(ctrlr);
    2053           0 :                 return -EINVAL;
    2054             :         }
    2055             : 
    2056           0 :         ctrlr->opts.dhchap_key =
    2057           0 :                 SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key);
    2058           0 :         ctrlr->opts.dhchap_ctrlr_key =
    2059           0 :                 SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key);
    2060           0 :         nvme_ctrlr_unlock(ctrlr);
    2061             : 
    2062           0 :         return 0;
    2063             : }
    2064             : 
    2065             : static void
    2066          16 : nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2067             : {
    2068          16 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2069             : 
    2070          16 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2071           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_identify_controller failed!\n");
    2072           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2073           0 :                 return;
    2074             :         }
    2075             : 
    2076             :         /*
    2077             :          * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
    2078             :          *  controller supports.
    2079             :          */
    2080          16 :         ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
    2081          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
    2082          16 :         if (ctrlr->cdata.mdts > 0) {
    2083           0 :                 ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
    2084             :                                                 ctrlr->min_page_size * (1 << ctrlr->cdata.mdts));
    2085           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
    2086             :         }
    2087             : 
    2088          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
    2089          16 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    2090           1 :                 ctrlr->cntlid = ctrlr->cdata.cntlid;
    2091             :         } else {
    2092             :                 /*
    2093             :                  * Fabrics controllers should already have CNTLID from the Connect command.
    2094             :                  *
    2095             :                  * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
    2096             :                  * trust the one from Connect.
    2097             :                  */
    2098          15 :                 if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
    2099           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
    2100             :                                             ctrlr->cdata.cntlid, ctrlr->cntlid);
    2101             :                 }
    2102             :         }
    2103             : 
    2104          16 :         if (ctrlr->cdata.sgls.supported && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2105           0 :                 assert(ctrlr->cdata.sgls.supported != 0x3);
    2106           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
    2107           0 :                 if (ctrlr->cdata.sgls.supported == 0x2) {
    2108           0 :                         ctrlr->flags |= SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT;
    2109             :                 }
    2110             : 
    2111           0 :                 ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
    2112           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_sges %u\n", ctrlr->max_sges);
    2113             :         }
    2114             : 
    2115          16 :         if (ctrlr->cdata.sgls.metadata_address && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2116           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED;
    2117             :         }
    2118             : 
    2119          16 :         if (ctrlr->cdata.oacs.security && !(ctrlr->quirks & NVME_QUIRK_OACS_SECURITY)) {
    2120           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
    2121             :         }
    2122             : 
    2123          16 :         if (ctrlr->cdata.oacs.directives) {
    2124           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED;
    2125             :         }
    2126             : 
    2127          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "fuses compare and write: %d\n",
    2128             :                             ctrlr->cdata.fuses.compare_and_write);
    2129          16 :         if (ctrlr->cdata.fuses.compare_and_write) {
    2130           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED;
    2131             :         }
    2132             : 
    2133          16 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
    2134          16 :                              ctrlr->opts.admin_timeout_ms);
    2135             : }
    2136             : 
    2137             : static int
    2138          16 : nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
    2139             : {
    2140             :         int     rc;
    2141             : 
    2142          16 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
    2143          16 :                              ctrlr->opts.admin_timeout_ms);
    2144             : 
    2145          16 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0, 0,
    2146          16 :                                      &ctrlr->cdata, sizeof(ctrlr->cdata),
    2147             :                                      nvme_ctrlr_identify_done, ctrlr);
    2148          16 :         if (rc != 0) {
    2149           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2150           0 :                 return rc;
    2151             :         }
    2152             : 
    2153          16 :         return 0;
    2154             : }
    2155             : 
    2156             : static void
    2157           0 : nvme_ctrlr_get_zns_cmd_and_effects_log_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2158             : {
    2159             :         struct spdk_nvme_cmds_and_effect_log_page *log_page;
    2160           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    2161             : 
    2162           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2163           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_get_zns_cmd_and_effects_log failed!\n");
    2164           0 :                 spdk_free(ctrlr->tmp_ptr);
    2165           0 :                 ctrlr->tmp_ptr = NULL;
    2166           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2167           0 :                 return;
    2168             :         }
    2169             : 
    2170           0 :         log_page = ctrlr->tmp_ptr;
    2171             : 
    2172           0 :         if (log_page->io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND].csupp) {
    2173           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED;
    2174             :         }
    2175           0 :         spdk_free(ctrlr->tmp_ptr);
    2176           0 :         ctrlr->tmp_ptr = NULL;
    2177             : 
    2178           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, ctrlr->opts.admin_timeout_ms);
    2179             : }
    2180             : 
    2181             : static int
    2182           0 : nvme_ctrlr_get_zns_cmd_and_effects_log(struct spdk_nvme_ctrlr *ctrlr)
    2183             : {
    2184             :         int rc;
    2185             : 
    2186           0 :         assert(!ctrlr->tmp_ptr);
    2187           0 :         ctrlr->tmp_ptr = spdk_zmalloc(sizeof(struct spdk_nvme_cmds_and_effect_log_page), 64, NULL,
    2188             :                                       SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2189           0 :         if (!ctrlr->tmp_ptr) {
    2190           0 :                 rc = -ENOMEM;
    2191           0 :                 goto error;
    2192             :         }
    2193             : 
    2194           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
    2195           0 :                              ctrlr->opts.admin_timeout_ms);
    2196             : 
    2197           0 :         rc = spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, SPDK_NVME_LOG_COMMAND_EFFECTS_LOG,
    2198             :                         0, ctrlr->tmp_ptr, sizeof(struct spdk_nvme_cmds_and_effect_log_page),
    2199             :                         0, 0, 0, SPDK_NVME_CSI_ZNS << 24,
    2200             :                         nvme_ctrlr_get_zns_cmd_and_effects_log_done, ctrlr);
    2201           0 :         if (rc != 0) {
    2202           0 :                 goto error;
    2203             :         }
    2204             : 
    2205           0 :         return 0;
    2206             : 
    2207           0 : error:
    2208           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2209           0 :         spdk_free(ctrlr->tmp_ptr);
    2210           0 :         ctrlr->tmp_ptr = NULL;
    2211           0 :         return rc;
    2212             : }
    2213             : 
    2214             : static void
    2215           0 : nvme_ctrlr_identify_zns_specific_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2216             : {
    2217           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2218             : 
    2219           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2220             :                 /* no need to print an error, the controller simply does not support ZNS */
    2221           0 :                 nvme_ctrlr_free_zns_specific_data(ctrlr);
    2222           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2223           0 :                                      ctrlr->opts.admin_timeout_ms);
    2224           0 :                 return;
    2225             :         }
    2226             : 
    2227             :         /* A zero zasl value means use mdts */
    2228           0 :         if (ctrlr->cdata_zns->zasl) {
    2229           0 :                 uint32_t max_append = ctrlr->min_page_size * (1 << ctrlr->cdata_zns->zasl);
    2230           0 :                 ctrlr->max_zone_append_size = spdk_min(ctrlr->max_xfer_size, max_append);
    2231             :         } else {
    2232           0 :                 ctrlr->max_zone_append_size = ctrlr->max_xfer_size;
    2233             :         }
    2234             : 
    2235           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
    2236           0 :                              ctrlr->opts.admin_timeout_ms);
    2237             : }
    2238             : 
    2239             : /**
    2240             :  * This function will try to fetch the I/O Command Specific Controller data structure for
    2241             :  * each I/O Command Set supported by SPDK.
    2242             :  *
    2243             :  * If an I/O Command Set is not supported by the controller, "Invalid Field in Command"
    2244             :  * will be returned. Since we are fetching in a exploratively way, getting an error back
    2245             :  * from the controller should not be treated as fatal.
    2246             :  *
    2247             :  * I/O Command Sets not supported by SPDK will be skipped (e.g. Key Value Command Set).
    2248             :  *
    2249             :  * I/O Command Sets without a IOCS specific data structure (i.e. a zero-filled IOCS specific
    2250             :  * data structure) will be skipped (e.g. NVM Command Set, Key Value Command Set).
    2251             :  */
    2252             : static int
    2253          19 : nvme_ctrlr_identify_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2254             : {
    2255             :         int     rc;
    2256             : 
    2257          19 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2258          19 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2259          19 :                                      ctrlr->opts.admin_timeout_ms);
    2260          19 :                 return 0;
    2261             :         }
    2262             : 
    2263             :         /*
    2264             :          * Since SPDK currently only needs to fetch a single Command Set, keep the code here,
    2265             :          * instead of creating multiple NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC substates,
    2266             :          * which would require additional functions and complexity for no good reason.
    2267             :          */
    2268           0 :         assert(!ctrlr->cdata_zns);
    2269           0 :         ctrlr->cdata_zns = spdk_zmalloc(sizeof(*ctrlr->cdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2270             :                                         SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2271           0 :         if (!ctrlr->cdata_zns) {
    2272           0 :                 rc = -ENOMEM;
    2273           0 :                 goto error;
    2274             :         }
    2275             : 
    2276           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
    2277           0 :                              ctrlr->opts.admin_timeout_ms);
    2278             : 
    2279           0 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR_IOCS, 0, 0, SPDK_NVME_CSI_ZNS,
    2280           0 :                                      ctrlr->cdata_zns, sizeof(*ctrlr->cdata_zns),
    2281             :                                      nvme_ctrlr_identify_zns_specific_done, ctrlr);
    2282           0 :         if (rc != 0) {
    2283           0 :                 goto error;
    2284             :         }
    2285             : 
    2286           0 :         return 0;
    2287             : 
    2288           0 : error:
    2289           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2290           0 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    2291           0 :         return rc;
    2292             : }
    2293             : 
    2294             : enum nvme_active_ns_state {
    2295             :         NVME_ACTIVE_NS_STATE_IDLE,
    2296             :         NVME_ACTIVE_NS_STATE_PROCESSING,
    2297             :         NVME_ACTIVE_NS_STATE_DONE,
    2298             :         NVME_ACTIVE_NS_STATE_ERROR
    2299             : };
    2300             : 
    2301             : typedef void (*nvme_active_ns_ctx_deleter)(struct nvme_active_ns_ctx *);
    2302             : 
    2303             : struct nvme_active_ns_ctx {
    2304             :         struct spdk_nvme_ctrlr *ctrlr;
    2305             :         uint32_t page_count;
    2306             :         uint32_t next_nsid;
    2307             :         uint32_t *new_ns_list;
    2308             :         nvme_active_ns_ctx_deleter deleter;
    2309             : 
    2310             :         enum nvme_active_ns_state state;
    2311             : };
    2312             : 
    2313             : static struct nvme_active_ns_ctx *
    2314          45 : nvme_active_ns_ctx_create(struct spdk_nvme_ctrlr *ctrlr, nvme_active_ns_ctx_deleter deleter)
    2315             : {
    2316             :         struct nvme_active_ns_ctx *ctx;
    2317          45 :         uint32_t *new_ns_list = NULL;
    2318             : 
    2319          45 :         ctx = calloc(1, sizeof(*ctx));
    2320          45 :         if (!ctx) {
    2321           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate nvme_active_ns_ctx!\n");
    2322           0 :                 return NULL;
    2323             :         }
    2324             : 
    2325          45 :         new_ns_list = spdk_zmalloc(sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
    2326             :                                    NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
    2327          45 :         if (!new_ns_list) {
    2328           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate active_ns_list!\n");
    2329           0 :                 free(ctx);
    2330           0 :                 return NULL;
    2331             :         }
    2332             : 
    2333          45 :         ctx->page_count = 1;
    2334          45 :         ctx->new_ns_list = new_ns_list;
    2335          45 :         ctx->ctrlr = ctrlr;
    2336          45 :         ctx->deleter = deleter;
    2337             : 
    2338          45 :         return ctx;
    2339             : }
    2340             : 
    2341             : static void
    2342          45 : nvme_active_ns_ctx_destroy(struct nvme_active_ns_ctx *ctx)
    2343             : {
    2344          45 :         spdk_free(ctx->new_ns_list);
    2345          45 :         free(ctx);
    2346          45 : }
    2347             : 
    2348             : static int
    2349       18403 : nvme_ctrlr_destruct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2350             : {
    2351       18403 :         struct spdk_nvme_ns tmp, *ns;
    2352             : 
    2353       18403 :         assert(ctrlr != NULL);
    2354             : 
    2355       18403 :         tmp.id = nsid;
    2356       18403 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    2357       18403 :         if (ns == NULL) {
    2358           0 :                 return -EINVAL;
    2359             :         }
    2360             : 
    2361       18403 :         nvme_ns_destruct(ns);
    2362       18403 :         ns->active = false;
    2363             : 
    2364       18403 :         return 0;
    2365             : }
    2366             : 
    2367             : static int
    2368       12311 : nvme_ctrlr_construct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2369             : {
    2370             :         struct spdk_nvme_ns *ns;
    2371             : 
    2372       12311 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    2373           0 :                 return -EINVAL;
    2374             :         }
    2375             : 
    2376             :         /* Namespaces are constructed on demand, so simply request it. */
    2377       12311 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2378       12311 :         if (ns == NULL) {
    2379           0 :                 return -ENOMEM;
    2380             :         }
    2381             : 
    2382       12311 :         ns->active = true;
    2383             : 
    2384       12311 :         return 0;
    2385             : }
    2386             : 
    2387             : static void
    2388          44 : nvme_ctrlr_identify_active_ns_swap(struct spdk_nvme_ctrlr *ctrlr, uint32_t *new_ns_list,
    2389             :                                    size_t max_entries)
    2390             : {
    2391          44 :         uint32_t active_ns_count = 0;
    2392             :         size_t i;
    2393             :         uint32_t nsid;
    2394             :         struct spdk_nvme_ns *ns, *tmp_ns;
    2395             :         int rc;
    2396             : 
    2397             :         /* First, remove namespaces that no longer exist */
    2398       15387 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    2399       15343 :                 nsid = new_ns_list[0];
    2400       15343 :                 active_ns_count = 0;
    2401     3547429 :                 while (nsid != 0) {
    2402     3536712 :                         if (nsid == ns->id) {
    2403        4626 :                                 break;
    2404             :                         }
    2405             : 
    2406     3532086 :                         nsid = new_ns_list[active_ns_count++];
    2407             :                 }
    2408             : 
    2409       15343 :                 if (nsid != ns->id) {
    2410             :                         /* Did not find this namespace id in the new list. */
    2411       10717 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was removed\n", ns->id);
    2412       10717 :                         nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    2413             :                 }
    2414             :         }
    2415             : 
    2416             :         /* Next, add new namespaces */
    2417          44 :         active_ns_count = 0;
    2418       12355 :         for (i = 0; i < max_entries; i++) {
    2419       12355 :                 nsid = new_ns_list[active_ns_count];
    2420             : 
    2421       12355 :                 if (nsid == 0) {
    2422          44 :                         break;
    2423             :                 }
    2424             : 
    2425             :                 /* If the namespace already exists, this will not construct it a second time. */
    2426       12311 :                 rc = nvme_ctrlr_construct_namespace(ctrlr, nsid);
    2427       12311 :                 if (rc != 0) {
    2428             :                         /* We can't easily handle a failure here. But just move on. */
    2429           0 :                         assert(false);
    2430             :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to allocate a namespace object.\n");
    2431             :                         continue;
    2432             :                 }
    2433             : 
    2434       12311 :                 active_ns_count++;
    2435             :         }
    2436             : 
    2437          44 :         ctrlr->active_ns_count = active_ns_count;
    2438          44 : }
    2439             : 
    2440             : static void
    2441          30 : nvme_ctrlr_identify_active_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2442             : {
    2443          30 :         struct nvme_active_ns_ctx *ctx = arg;
    2444          30 :         uint32_t *new_ns_list = NULL;
    2445             : 
    2446          30 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2447           1 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2448           1 :                 goto out;
    2449             :         }
    2450             : 
    2451          29 :         ctx->next_nsid = ctx->new_ns_list[1024 * ctx->page_count - 1];
    2452          29 :         if (ctx->next_nsid == 0) {
    2453          24 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2454          24 :                 goto out;
    2455             :         }
    2456             : 
    2457           5 :         ctx->page_count++;
    2458           5 :         new_ns_list = spdk_realloc(ctx->new_ns_list,
    2459           5 :                                    ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2460           5 :                                    ctx->ctrlr->page_size);
    2461           5 :         if (!new_ns_list) {
    2462           0 :                 SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2463           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2464           0 :                 goto out;
    2465             :         }
    2466             : 
    2467           5 :         ctx->new_ns_list = new_ns_list;
    2468           5 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2469           5 :         return;
    2470             : 
    2471          25 : out:
    2472          25 :         if (ctx->deleter) {
    2473           9 :                 ctx->deleter(ctx);
    2474             :         }
    2475             : }
    2476             : 
    2477             : static void
    2478          50 : nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx)
    2479             : {
    2480          50 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2481             :         uint32_t i;
    2482             :         int rc;
    2483             : 
    2484          50 :         if (ctrlr->cdata.nn == 0) {
    2485          16 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2486          16 :                 goto out;
    2487             :         }
    2488             : 
    2489          34 :         assert(ctx->new_ns_list != NULL);
    2490             : 
    2491             :         /*
    2492             :          * If controller doesn't support active ns list CNS 0x02 dummy up
    2493             :          * an active ns list, i.e. all namespaces report as active
    2494             :          */
    2495          34 :         if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 1, 0) || ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS) {
    2496             :                 uint32_t *new_ns_list;
    2497             : 
    2498             :                 /*
    2499             :                  * Active NS list must always end with zero element.
    2500             :                  * So, we allocate for cdata.nn+1.
    2501             :                  */
    2502           4 :                 ctx->page_count = spdk_divide_round_up(ctrlr->cdata.nn + 1,
    2503             :                                                        sizeof(struct spdk_nvme_ns_list) / sizeof(new_ns_list[0]));
    2504           4 :                 new_ns_list = spdk_realloc(ctx->new_ns_list,
    2505           4 :                                            ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2506           4 :                                            ctx->ctrlr->page_size);
    2507           4 :                 if (!new_ns_list) {
    2508           0 :                         SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2509           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2510           0 :                         goto out;
    2511             :                 }
    2512             : 
    2513           4 :                 ctx->new_ns_list = new_ns_list;
    2514           4 :                 ctx->new_ns_list[ctrlr->cdata.nn] = 0;
    2515        4091 :                 for (i = 0; i < ctrlr->cdata.nn; i++) {
    2516        4087 :                         ctx->new_ns_list[i] = i + 1;
    2517             :                 }
    2518             : 
    2519           4 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2520           4 :                 goto out;
    2521             :         }
    2522             : 
    2523          30 :         ctx->state = NVME_ACTIVE_NS_STATE_PROCESSING;
    2524          30 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, ctx->next_nsid, 0,
    2525          30 :                                      &ctx->new_ns_list[1024 * (ctx->page_count - 1)], sizeof(struct spdk_nvme_ns_list),
    2526             :                                      nvme_ctrlr_identify_active_ns_async_done, ctx);
    2527          30 :         if (rc != 0) {
    2528           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2529           0 :                 goto out;
    2530             :         }
    2531             : 
    2532          30 :         return;
    2533             : 
    2534          20 : out:
    2535          20 :         if (ctx->deleter) {
    2536          15 :                 ctx->deleter(ctx);
    2537             :         }
    2538             : }
    2539             : 
    2540             : static void
    2541          24 : _nvme_active_ns_ctx_deleter(struct nvme_active_ns_ctx *ctx)
    2542             : {
    2543          24 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2544             :         struct spdk_nvme_ns *ns;
    2545             : 
    2546          24 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2547           0 :                 nvme_active_ns_ctx_destroy(ctx);
    2548           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2549           0 :                 return;
    2550             :         }
    2551             : 
    2552          24 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2553             : 
    2554          28 :         RB_FOREACH(ns, nvme_ns_tree, &ctrlr->ns) {
    2555           4 :                 nvme_ns_free_iocs_specific_data(ns);
    2556             :         }
    2557             : 
    2558          24 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2559          24 :         nvme_active_ns_ctx_destroy(ctx);
    2560          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, ctrlr->opts.admin_timeout_ms);
    2561             : }
    2562             : 
    2563             : static void
    2564          24 : _nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2565             : {
    2566             :         struct nvme_active_ns_ctx *ctx;
    2567             : 
    2568          24 :         ctx = nvme_active_ns_ctx_create(ctrlr, _nvme_active_ns_ctx_deleter);
    2569          24 :         if (!ctx) {
    2570           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2571           0 :                 return;
    2572             :         }
    2573             : 
    2574          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
    2575          24 :                              ctrlr->opts.admin_timeout_ms);
    2576          24 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2577             : }
    2578             : 
    2579             : int
    2580          21 : nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2581             : {
    2582             :         struct nvme_active_ns_ctx *ctx;
    2583             :         int rc;
    2584             : 
    2585          21 :         ctx = nvme_active_ns_ctx_create(ctrlr, NULL);
    2586          21 :         if (!ctx) {
    2587           0 :                 return -ENOMEM;
    2588             :         }
    2589             : 
    2590          21 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2591          21 :         while (ctx->state == NVME_ACTIVE_NS_STATE_PROCESSING) {
    2592           0 :                 rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    2593           0 :                 if (rc < 0) {
    2594           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2595           0 :                         break;
    2596             :                 }
    2597             :         }
    2598             : 
    2599          21 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2600           1 :                 nvme_active_ns_ctx_destroy(ctx);
    2601           1 :                 return -ENXIO;
    2602             :         }
    2603             : 
    2604          20 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2605          20 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2606          20 :         nvme_active_ns_ctx_destroy(ctx);
    2607             : 
    2608          20 :         return 0;
    2609             : }
    2610             : 
    2611             : static void
    2612          21 : nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2613             : {
    2614          21 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2615          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2616             :         uint32_t nsid;
    2617             :         int rc;
    2618             : 
    2619          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2620           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2621           0 :                 return;
    2622             :         }
    2623             : 
    2624          21 :         nvme_ns_set_identify_data(ns);
    2625             : 
    2626             :         /* move on to the next active NS */
    2627          21 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2628          21 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2629          21 :         if (ns == NULL) {
    2630           6 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2631           6 :                                      ctrlr->opts.admin_timeout_ms);
    2632           6 :                 return;
    2633             :         }
    2634          15 :         ns->ctrlr = ctrlr;
    2635          15 :         ns->id = nsid;
    2636             : 
    2637          15 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2638          15 :         if (rc) {
    2639           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2640             :         }
    2641             : }
    2642             : 
    2643             : static int
    2644          21 : nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
    2645             : {
    2646          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2647             :         struct spdk_nvme_ns_data *nsdata;
    2648             : 
    2649          21 :         nsdata = &ns->nsdata;
    2650             : 
    2651          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
    2652          21 :                              ctrlr->opts.admin_timeout_ms);
    2653          21 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0,
    2654             :                                        nsdata, sizeof(*nsdata),
    2655             :                                        nvme_ctrlr_identify_ns_async_done, ns);
    2656             : }
    2657             : 
    2658             : static int
    2659          14 : nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2660             : {
    2661             :         uint32_t nsid;
    2662             :         struct spdk_nvme_ns *ns;
    2663             :         int rc;
    2664             : 
    2665          14 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2666          14 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2667          14 :         if (ns == NULL) {
    2668             :                 /* No active NS, move on to the next state */
    2669           8 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2670           8 :                                      ctrlr->opts.admin_timeout_ms);
    2671           8 :                 return 0;
    2672             :         }
    2673             : 
    2674           6 :         ns->ctrlr = ctrlr;
    2675           6 :         ns->id = nsid;
    2676             : 
    2677           6 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2678           6 :         if (rc) {
    2679           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2680             :         }
    2681             : 
    2682           6 :         return rc;
    2683             : }
    2684             : 
    2685             : static int
    2686           4 : nvme_ctrlr_identify_namespaces_iocs_specific_next(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    2687             : {
    2688             :         uint32_t nsid;
    2689             :         struct spdk_nvme_ns *ns;
    2690             :         int rc;
    2691             : 
    2692           4 :         if (!prev_nsid) {
    2693           2 :                 nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2694             :         } else {
    2695             :                 /* move on to the next active NS */
    2696           2 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, prev_nsid);
    2697             :         }
    2698             : 
    2699           4 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2700           4 :         if (ns == NULL) {
    2701             :                 /* No first/next active NS, move on to the next state */
    2702           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2703           1 :                                      ctrlr->opts.admin_timeout_ms);
    2704           1 :                 return 0;
    2705             :         }
    2706             : 
    2707             :         /* loop until we find a ns which has (supported) iocs specific data */
    2708          10 :         while (!nvme_ns_has_supported_iocs_specific_data(ns)) {
    2709           8 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2710           8 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2711           8 :                 if (ns == NULL) {
    2712             :                         /* no namespace with (supported) iocs specific data found */
    2713           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2714           1 :                                              ctrlr->opts.admin_timeout_ms);
    2715           1 :                         return 0;
    2716             :                 }
    2717             :         }
    2718             : 
    2719           2 :         rc = nvme_ctrlr_identify_ns_iocs_specific_async(ns);
    2720           2 :         if (rc) {
    2721           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2722             :         }
    2723             : 
    2724           2 :         return rc;
    2725             : }
    2726             : 
    2727             : static void
    2728           0 : nvme_ctrlr_identify_ns_zns_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2729             : {
    2730           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2731           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2732             : 
    2733           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2734           0 :                 nvme_ns_free_zns_specific_data(ns);
    2735           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2736           0 :                 return;
    2737             :         }
    2738             : 
    2739           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2740             : }
    2741             : 
    2742             : static int
    2743           2 : nvme_ctrlr_identify_ns_zns_specific_async(struct spdk_nvme_ns *ns)
    2744             : {
    2745           2 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2746             :         int rc;
    2747             : 
    2748           2 :         assert(!ns->nsdata_zns);
    2749           2 :         ns->nsdata_zns = spdk_zmalloc(sizeof(*ns->nsdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2750             :                                       SPDK_MALLOC_SHARE);
    2751           2 :         if (!ns->nsdata_zns) {
    2752           0 :                 return -ENOMEM;
    2753             :         }
    2754             : 
    2755           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2756           2 :                              ctrlr->opts.admin_timeout_ms);
    2757           2 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2758           2 :                                      ns->nsdata_zns, sizeof(*ns->nsdata_zns),
    2759             :                                      nvme_ctrlr_identify_ns_zns_specific_async_done, ns);
    2760           2 :         if (rc) {
    2761           1 :                 nvme_ns_free_zns_specific_data(ns);
    2762             :         }
    2763             : 
    2764           2 :         return rc;
    2765             : }
    2766             : 
    2767             : static void
    2768           0 : nvme_ctrlr_identify_ns_nvm_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2769             : {
    2770           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2771           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2772             : 
    2773           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2774           0 :                 nvme_ns_free_nvm_specific_data(ns);
    2775           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2776           0 :                 return;
    2777             :         }
    2778             : 
    2779           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2780             : }
    2781             : 
    2782             : static int
    2783           0 : nvme_ctrlr_identify_ns_nvm_specific_async(struct spdk_nvme_ns *ns)
    2784             : {
    2785           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2786             :         int rc;
    2787             : 
    2788           0 :         assert(!ns->nsdata_nvm);
    2789           0 :         ns->nsdata_nvm = spdk_zmalloc(sizeof(*ns->nsdata_nvm), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2790             :                                       SPDK_MALLOC_SHARE);
    2791           0 :         if (!ns->nsdata_nvm) {
    2792           0 :                 return -ENOMEM;
    2793             :         }
    2794             : 
    2795           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2796           0 :                              ctrlr->opts.admin_timeout_ms);
    2797           0 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2798           0 :                                      ns->nsdata_nvm, sizeof(*ns->nsdata_nvm),
    2799             :                                      nvme_ctrlr_identify_ns_nvm_specific_async_done, ns);
    2800           0 :         if (rc) {
    2801           0 :                 nvme_ns_free_nvm_specific_data(ns);
    2802             :         }
    2803             : 
    2804           0 :         return rc;
    2805             : }
    2806             : 
    2807             : static int
    2808           2 : nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns)
    2809             : {
    2810           2 :         switch (ns->csi) {
    2811           2 :         case SPDK_NVME_CSI_ZNS:
    2812           2 :                 return nvme_ctrlr_identify_ns_zns_specific_async(ns);
    2813           0 :         case SPDK_NVME_CSI_NVM:
    2814           0 :                 if (ns->ctrlr->cdata.ctratt.bits.elbas) {
    2815           0 :                         return nvme_ctrlr_identify_ns_nvm_specific_async(ns);
    2816             :                 }
    2817             :         /* fallthrough */
    2818             :         default:
    2819             :                 /*
    2820             :                  * This switch must handle all cases for which
    2821             :                  * nvme_ns_has_supported_iocs_specific_data() returns true,
    2822             :                  * other cases should never happen.
    2823             :                  */
    2824           0 :                 assert(0);
    2825             :         }
    2826             : 
    2827             :         return -EINVAL;
    2828             : }
    2829             : 
    2830             : static int
    2831          14 : nvme_ctrlr_identify_namespaces_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2832             : {
    2833          14 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2834             :                 /* Multi IOCS not supported/enabled, move on to the next state */
    2835          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2836          14 :                                      ctrlr->opts.admin_timeout_ms);
    2837          14 :                 return 0;
    2838             :         }
    2839             : 
    2840           0 :         return nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, 0);
    2841             : }
    2842             : 
    2843             : static void
    2844           6 : nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2845             : {
    2846           6 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2847           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2848             :         uint32_t nsid;
    2849             :         int rc;
    2850             : 
    2851           6 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2852             :                 /*
    2853             :                  * Many controllers claim to be compatible with NVMe 1.3, however,
    2854             :                  * they do not implement NS ID Desc List. Therefore, instead of setting
    2855             :                  * the state to NVME_CTRLR_STATE_ERROR, silently ignore the completion
    2856             :                  * error and move on to the next state.
    2857             :                  *
    2858             :                  * The proper way is to create a new quirk for controllers that violate
    2859             :                  * the NVMe 1.3 spec by not supporting NS ID Desc List.
    2860             :                  * (Re-using the NVME_QUIRK_IDENTIFY_CNS quirk is not possible, since
    2861             :                  * it is too generic and was added in order to handle controllers that
    2862             :                  * violate the NVMe 1.1 spec by not supporting ACTIVE LIST).
    2863             :                  */
    2864           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2865           0 :                                      ctrlr->opts.admin_timeout_ms);
    2866           0 :                 return;
    2867             :         }
    2868             : 
    2869           6 :         nvme_ns_set_id_desc_list_data(ns);
    2870             : 
    2871             :         /* move on to the next active NS */
    2872           6 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2873           6 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2874           6 :         if (ns == NULL) {
    2875           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2876           2 :                                      ctrlr->opts.admin_timeout_ms);
    2877           2 :                 return;
    2878             :         }
    2879             : 
    2880           4 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2881           4 :         if (rc) {
    2882           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2883             :         }
    2884             : }
    2885             : 
    2886             : static int
    2887           6 : nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
    2888             : {
    2889           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2890             : 
    2891           6 :         memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
    2892             : 
    2893           6 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
    2894           6 :                              ctrlr->opts.admin_timeout_ms);
    2895           6 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
    2896           6 :                                        0, ns->id, 0, ns->id_desc_list, sizeof(ns->id_desc_list),
    2897             :                                        nvme_ctrlr_identify_id_desc_async_done, ns);
    2898             : }
    2899             : 
    2900             : static int
    2901          14 : nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2902             : {
    2903             :         uint32_t nsid;
    2904             :         struct spdk_nvme_ns *ns;
    2905             :         int rc;
    2906             : 
    2907          14 :         if ((ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) &&
    2908          12 :              !(ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) ||
    2909           2 :             (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
    2910          12 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
    2911             :                 /* NS ID Desc List not supported, move on to the next state */
    2912          12 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2913          12 :                                      ctrlr->opts.admin_timeout_ms);
    2914          12 :                 return 0;
    2915             :         }
    2916             : 
    2917           2 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2918           2 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2919           2 :         if (ns == NULL) {
    2920             :                 /* No active NS, move on to the next state */
    2921           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2922           0 :                                      ctrlr->opts.admin_timeout_ms);
    2923           0 :                 return 0;
    2924             :         }
    2925             : 
    2926           2 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2927           2 :         if (rc) {
    2928           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2929             :         }
    2930             : 
    2931           2 :         return rc;
    2932             : }
    2933             : 
    2934             : static void
    2935          19 : nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
    2936             : {
    2937          19 :         if (spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    2938           4 :                 if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
    2939           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Incorrect IOCCSZ %u, the minimum value should be 4\n",
    2940             :                                           ctrlr->cdata.nvmf_specific.ioccsz);
    2941           0 :                         ctrlr->cdata.nvmf_specific.ioccsz = 4;
    2942           0 :                         assert(0);
    2943             :                 }
    2944           4 :                 ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
    2945           4 :                 ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
    2946             :         }
    2947          19 : }
    2948             : 
    2949             : static void
    2950          19 : nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2951             : {
    2952             :         uint32_t cq_allocated, sq_allocated, min_allocated, i;
    2953          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2954             : 
    2955          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2956           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Number of Queues failed!\n");
    2957           0 :                 ctrlr->opts.num_io_queues = 0;
    2958             :         } else {
    2959             :                 /*
    2960             :                  * Data in cdw0 is 0-based.
    2961             :                  * Lower 16-bits indicate number of submission queues allocated.
    2962             :                  * Upper 16-bits indicate number of completion queues allocated.
    2963             :                  */
    2964          19 :                 sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
    2965          19 :                 cq_allocated = (cpl->cdw0 >> 16) + 1;
    2966             : 
    2967             :                 /*
    2968             :                  * For 1:1 queue mapping, set number of allocated queues to be minimum of
    2969             :                  * submission and completion queues.
    2970             :                  */
    2971          19 :                 min_allocated = spdk_min(sq_allocated, cq_allocated);
    2972             : 
    2973             :                 /* Set number of queues to be minimum of requested and actually allocated. */
    2974          19 :                 ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
    2975             : 
    2976          19 :                 if (ctrlr->opts.enable_interrupts) {
    2977           0 :                         ctrlr->opts.num_io_queues = spdk_min(MAX_IO_QUEUES_WITH_INTERRUPTS,
    2978             :                                                              ctrlr->opts.num_io_queues);
    2979           0 :                         if (nvme_transport_ctrlr_enable_interrupts(ctrlr) < 0) {
    2980           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to enable interrupts!\n");
    2981           0 :                                 ctrlr->opts.enable_interrupts = false;
    2982             :                         }
    2983             :                 }
    2984             :         }
    2985             : 
    2986          19 :         ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
    2987          19 :         if (ctrlr->free_io_qids == NULL) {
    2988           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2989           0 :                 return;
    2990             :         }
    2991             : 
    2992             :         /* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
    2993          69 :         for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
    2994          50 :                 spdk_nvme_ctrlr_free_qid(ctrlr, i);
    2995             :         }
    2996             : 
    2997          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
    2998          19 :                              ctrlr->opts.admin_timeout_ms);
    2999             : }
    3000             : 
    3001             : static int
    3002          19 : nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
    3003             : {
    3004             :         int rc;
    3005             : 
    3006          19 :         if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
    3007           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Limiting requested num_io_queues %u to max %d\n",
    3008             :                                      ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
    3009           0 :                 ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
    3010          19 :         } else if (ctrlr->opts.num_io_queues < 1) {
    3011          13 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Requested num_io_queues 0, increasing to 1\n");
    3012          13 :                 ctrlr->opts.num_io_queues = 1;
    3013             :         }
    3014             : 
    3015          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
    3016          19 :                              ctrlr->opts.admin_timeout_ms);
    3017             : 
    3018          19 :         rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
    3019             :                                            nvme_ctrlr_set_num_queues_done, ctrlr);
    3020          19 :         if (rc != 0) {
    3021           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3022           0 :                 return rc;
    3023             :         }
    3024             : 
    3025          19 :         return 0;
    3026             : }
    3027             : 
    3028             : static void
    3029           3 : nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3030             : {
    3031             :         uint32_t keep_alive_interval_us;
    3032           3 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3033             : 
    3034           3 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3035           2 :                 if ((cpl->status.sct == SPDK_NVME_SCT_GENERIC) &&
    3036           2 :                     (cpl->status.sc == SPDK_NVME_SC_INVALID_FIELD)) {
    3037           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Keep alive timeout Get Feature is not supported\n");
    3038             :                 } else {
    3039           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: SC %x SCT %x\n",
    3040             :                                           cpl->status.sc, cpl->status.sct);
    3041           1 :                         ctrlr->opts.keep_alive_timeout_ms = 0;
    3042           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3043           1 :                         return;
    3044             :                 }
    3045             :         } else {
    3046           1 :                 if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
    3047           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Controller adjusted keep alive timeout to %u ms\n",
    3048             :                                             cpl->cdw0);
    3049             :                 }
    3050             : 
    3051           1 :                 ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
    3052             :         }
    3053             : 
    3054           2 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    3055           0 :                 ctrlr->keep_alive_interval_ticks = 0;
    3056             :         } else {
    3057           2 :                 keep_alive_interval_us = ctrlr->opts.keep_alive_timeout_ms * 1000 / 2;
    3058             : 
    3059           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Sending keep alive every %u us\n", keep_alive_interval_us);
    3060             : 
    3061           2 :                 ctrlr->keep_alive_interval_ticks = (keep_alive_interval_us * spdk_get_ticks_hz()) /
    3062             :                                                    UINT64_C(1000000);
    3063             : 
    3064             :                 /* Schedule the first Keep Alive to be sent as soon as possible. */
    3065           2 :                 ctrlr->next_keep_alive_tick = spdk_get_ticks();
    3066             :         }
    3067             : 
    3068           2 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3069           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    3070             :         } else {
    3071           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3072           2 :                                      ctrlr->opts.admin_timeout_ms);
    3073             :         }
    3074             : }
    3075             : 
    3076             : static int
    3077          22 : nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
    3078             : {
    3079             :         int rc;
    3080             : 
    3081          22 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    3082          19 :                 if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3083           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    3084             :                 } else {
    3085          19 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3086          19 :                                              ctrlr->opts.admin_timeout_ms);
    3087             :                 }
    3088          19 :                 return 0;
    3089             :         }
    3090             : 
    3091             :         /* Note: Discovery controller identify data does not populate KAS according to spec. */
    3092           3 :         if (!spdk_nvme_ctrlr_is_discovery(ctrlr) && ctrlr->cdata.kas == 0) {
    3093           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Controller KAS is 0 - not enabling Keep Alive\n");
    3094           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    3095           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3096           0 :                                      ctrlr->opts.admin_timeout_ms);
    3097           0 :                 return 0;
    3098             :         }
    3099             : 
    3100           3 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
    3101           3 :                              ctrlr->opts.admin_timeout_ms);
    3102             : 
    3103             :         /* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
    3104           3 :         rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
    3105             :                                              nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
    3106           3 :         if (rc != 0) {
    3107           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: %d\n", rc);
    3108           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    3109           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3110           0 :                 return rc;
    3111             :         }
    3112             : 
    3113           3 :         return 0;
    3114             : }
    3115             : 
    3116             : static void
    3117           0 : nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3118             : {
    3119           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3120             : 
    3121           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3122             :                 /*
    3123             :                  * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
    3124             :                  * is optional.
    3125             :                  */
    3126           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
    3127             :                                    cpl->status.sc, cpl->status.sct);
    3128             :         } else {
    3129           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Set Features - Host ID was successful\n");
    3130             :         }
    3131             : 
    3132           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3133           0 : }
    3134             : 
    3135             : static int
    3136          14 : nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
    3137             : {
    3138             :         uint8_t *host_id;
    3139             :         uint32_t host_id_size;
    3140             :         int rc;
    3141             : 
    3142          14 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    3143             :                 /*
    3144             :                  * NVMe-oF sends the host ID during Connect and doesn't allow
    3145             :                  * Set Features - Host Identifier after Connect, so we don't need to do anything here.
    3146             :                  */
    3147          14 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "NVMe-oF transport - not sending Set Features - Host ID\n");
    3148          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3149          14 :                 return 0;
    3150             :         }
    3151             : 
    3152           0 :         if (ctrlr->cdata.ctratt.bits.host_id_exhid_supported) {
    3153           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 128-bit extended host identifier\n");
    3154           0 :                 host_id = ctrlr->opts.extended_host_id;
    3155           0 :                 host_id_size = sizeof(ctrlr->opts.extended_host_id);
    3156             :         } else {
    3157           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 64-bit host identifier\n");
    3158           0 :                 host_id = ctrlr->opts.host_id;
    3159           0 :                 host_id_size = sizeof(ctrlr->opts.host_id);
    3160             :         }
    3161             : 
    3162             :         /* If the user specified an all-zeroes host identifier, don't send the command. */
    3163           0 :         if (spdk_mem_all_zero(host_id, host_id_size)) {
    3164           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "User did not specify host ID - not sending Set Features - Host ID\n");
    3165           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3166           0 :                 return 0;
    3167             :         }
    3168             : 
    3169           0 :         SPDK_LOGDUMP(nvme, "host_id", host_id, host_id_size);
    3170             : 
    3171           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
    3172           0 :                              ctrlr->opts.admin_timeout_ms);
    3173             : 
    3174           0 :         rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
    3175           0 :         if (rc != 0) {
    3176           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Host ID failed: %d\n", rc);
    3177           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3178           0 :                 return rc;
    3179             :         }
    3180             : 
    3181           0 :         return 0;
    3182             : }
    3183             : 
    3184             : void
    3185           4 : nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    3186             : {
    3187             :         uint32_t nsid;
    3188             :         struct spdk_nvme_ns *ns;
    3189             : 
    3190           4 :         for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    3191          19 :              nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
    3192          15 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    3193          15 :                 nvme_ns_construct(ns, nsid, ctrlr);
    3194             :         }
    3195           4 : }
    3196             : 
    3197             : static int
    3198           4 : nvme_ctrlr_clear_changed_ns_log(struct spdk_nvme_ctrlr *ctrlr)
    3199             : {
    3200             :         struct nvme_completion_poll_status      *status;
    3201           4 :         int             rc = -ENOMEM;
    3202           4 :         char            *buffer = NULL;
    3203             :         uint32_t        nsid;
    3204           4 :         size_t          buf_size = (SPDK_NVME_MAX_CHANGED_NAMESPACES * sizeof(uint32_t));
    3205             : 
    3206           4 :         if (ctrlr->opts.disable_read_changed_ns_list_log_page) {
    3207           0 :                 return 0;
    3208             :         }
    3209             : 
    3210           4 :         buffer = spdk_dma_zmalloc(buf_size, 4096, NULL);
    3211           4 :         if (!buffer) {
    3212           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate buffer for getting "
    3213             :                                   "changed ns log.\n");
    3214           0 :                 return rc;
    3215             :         }
    3216             : 
    3217           4 :         status = calloc(1, sizeof(*status));
    3218           4 :         if (!status) {
    3219           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    3220           0 :                 goto free_buffer;
    3221             :         }
    3222             : 
    3223           4 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr,
    3224             :                                               SPDK_NVME_LOG_CHANGED_NS_LIST,
    3225             :                                               SPDK_NVME_GLOBAL_NS_TAG,
    3226             :                                               buffer, buf_size, 0,
    3227             :                                               nvme_completion_poll_cb, status);
    3228             : 
    3229           4 :         if (rc) {
    3230           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_get_log_page() failed: rc=%d\n", rc);
    3231           0 :                 free(status);
    3232           0 :                 goto free_buffer;
    3233             :         }
    3234             : 
    3235           4 :         rc = nvme_wait_for_completion_timeout(ctrlr->adminq, status,
    3236           4 :                                               ctrlr->opts.admin_timeout_ms * 1000);
    3237           4 :         if (!status->timed_out) {
    3238           4 :                 free(status);
    3239             :         }
    3240             : 
    3241           4 :         if (rc) {
    3242           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "wait for spdk_nvme_ctrlr_cmd_get_log_page failed: rc=%d\n", rc);
    3243           0 :                 goto free_buffer;
    3244             :         }
    3245             : 
    3246             :         /* only check the case of overflow. */
    3247           4 :         nsid = from_le32(buffer);
    3248           4 :         if (nsid == 0xffffffffu) {
    3249           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "changed ns log overflowed.\n");
    3250             :         }
    3251             : 
    3252           4 : free_buffer:
    3253           4 :         spdk_dma_free(buffer);
    3254           4 :         return rc;
    3255             : }
    3256             : 
    3257             : static void
    3258           5 : nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3259             :                                const struct spdk_nvme_cpl *cpl)
    3260             : {
    3261             :         union spdk_nvme_async_event_completion event;
    3262             :         struct spdk_nvme_ctrlr_process *active_proc;
    3263             :         int rc;
    3264             : 
    3265           5 :         event.raw = cpl->cdw0;
    3266             : 
    3267           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3268           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
    3269           4 :                 nvme_ctrlr_clear_changed_ns_log(ctrlr);
    3270             : 
    3271           4 :                 rc = nvme_ctrlr_identify_active_ns(ctrlr);
    3272           4 :                 if (rc) {
    3273           0 :                         return;
    3274             :                 }
    3275           4 :                 nvme_ctrlr_update_namespaces(ctrlr);
    3276           4 :                 nvme_io_msg_ctrlr_update(ctrlr);
    3277             :         }
    3278             : 
    3279           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3280           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE)) {
    3281           1 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
    3282           1 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
    3283           1 :                         if (rc) {
    3284           0 :                                 return;
    3285             :                         }
    3286           1 :                         nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
    3287             :                                                       ctrlr);
    3288             :                 }
    3289             :         }
    3290             : 
    3291           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3292           5 :         if (active_proc && active_proc->aer_cb_fn) {
    3293           3 :                 active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
    3294             :         }
    3295             : }
    3296             : 
    3297             : static void
    3298           5 : nvme_ctrlr_queue_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3299             :                              const struct spdk_nvme_cpl *cpl)
    3300             : {
    3301             :         struct  spdk_nvme_ctrlr_aer_completion *nvme_event;
    3302             :         struct spdk_nvme_ctrlr_process *proc;
    3303             : 
    3304             :         /* Add async event to each process objects event list */
    3305          10 :         TAILQ_FOREACH(proc, &ctrlr->active_procs, tailq) {
    3306             :                 /* Must be shared memory so other processes can access */
    3307           5 :                 nvme_event = spdk_zmalloc(sizeof(*nvme_event), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    3308           5 :                 if (!nvme_event) {
    3309           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
    3310           0 :                         return;
    3311             :                 }
    3312           5 :                 nvme_event->cpl = *cpl;
    3313             : 
    3314           5 :                 STAILQ_INSERT_TAIL(&proc->async_events, nvme_event, link);
    3315             :         }
    3316             : }
    3317             : 
    3318             : static void
    3319           5 : nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr)
    3320             : {
    3321             :         struct  spdk_nvme_ctrlr_aer_completion  *nvme_event, *nvme_event_tmp;
    3322             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3323             : 
    3324           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3325             : 
    3326          10 :         STAILQ_FOREACH_SAFE(nvme_event, &active_proc->async_events, link, nvme_event_tmp) {
    3327           5 :                 STAILQ_REMOVE(&active_proc->async_events, nvme_event,
    3328             :                               spdk_nvme_ctrlr_aer_completion, link);
    3329           5 :                 nvme_ctrlr_process_async_event(ctrlr, &nvme_event->cpl);
    3330           5 :                 spdk_free(nvme_event);
    3331             : 
    3332             :         }
    3333           5 : }
    3334             : 
    3335             : static void
    3336           5 : nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    3337             : {
    3338           5 :         struct nvme_async_event_request *aer = arg;
    3339           5 :         struct spdk_nvme_ctrlr          *ctrlr = aer->ctrlr;
    3340             : 
    3341           5 :         if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
    3342           5 :             cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
    3343             :                 /*
    3344             :                  *  This is simulated when controller is being shut down, to
    3345             :                  *  effectively abort outstanding asynchronous event requests
    3346             :                  *  and make sure all memory is freed.  Do not repost the
    3347             :                  *  request in this case.
    3348             :                  */
    3349           0 :                 return;
    3350             :         }
    3351             : 
    3352           5 :         if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
    3353           0 :             cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
    3354             :                 /*
    3355             :                  *  SPDK will only send as many AERs as the device says it supports,
    3356             :                  *  so this status code indicates an out-of-spec device.  Do not repost
    3357             :                  *  the request in this case.
    3358             :                  */
    3359           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Controller appears out-of-spec for asynchronous event request\n"
    3360             :                                   "handling.  Do not repost this AER.\n");
    3361           0 :                 return;
    3362             :         }
    3363             : 
    3364             :         /* Add the events to the list */
    3365           5 :         nvme_ctrlr_queue_async_event(ctrlr, cpl);
    3366             : 
    3367             :         /* If the ctrlr was removed or in the destruct state, we should not send aer again */
    3368           5 :         if (ctrlr->is_removed || ctrlr->is_destructed) {
    3369           0 :                 return;
    3370             :         }
    3371             : 
    3372             :         /*
    3373             :          * Repost another asynchronous event request to replace the one
    3374             :          *  that just completed.
    3375             :          */
    3376           5 :         if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
    3377             :                 /*
    3378             :                  * We can't do anything to recover from a failure here,
    3379             :                  * so just print a warning message and leave the AER unsubmitted.
    3380             :                  */
    3381           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "resubmitting AER failed!\n");
    3382             :         }
    3383             : }
    3384             : 
    3385             : static int
    3386          24 : nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
    3387             :                                     struct nvme_async_event_request *aer)
    3388             : {
    3389             :         struct nvme_request *req;
    3390             : 
    3391          24 :         aer->ctrlr = ctrlr;
    3392          24 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
    3393          24 :         aer->req = req;
    3394          24 :         if (req == NULL) {
    3395           0 :                 return -1;
    3396             :         }
    3397             : 
    3398          24 :         req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
    3399          24 :         return nvme_ctrlr_submit_admin_request(ctrlr, req);
    3400             : }
    3401             : 
    3402             : static void
    3403          19 : nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3404             : {
    3405             :         struct nvme_async_event_request         *aer;
    3406             :         int                                     rc;
    3407             :         uint32_t                                i;
    3408          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3409             : 
    3410          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3411           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "nvme_ctrlr_configure_aer failed!\n");
    3412           0 :                 ctrlr->num_aers = 0;
    3413             :         } else {
    3414             :                 /* aerl is a zero-based value, so we need to add 1 here. */
    3415          19 :                 ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
    3416             :         }
    3417             : 
    3418          38 :         for (i = 0; i < ctrlr->num_aers; i++) {
    3419          19 :                 aer = &ctrlr->aer[i];
    3420          19 :                 rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
    3421          19 :                 if (rc) {
    3422           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_construct_and_submit_aer failed!\n");
    3423           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3424           0 :                         return;
    3425             :                 }
    3426             :         }
    3427          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, ctrlr->opts.admin_timeout_ms);
    3428             : }
    3429             : 
    3430             : static int
    3431          19 : nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
    3432             : {
    3433             :         union spdk_nvme_feat_async_event_configuration  config;
    3434             :         int                                             rc;
    3435             : 
    3436          19 :         config.raw = 0;
    3437             : 
    3438          19 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3439           0 :                 config.bits.discovery_log_change_notice = 1;
    3440             :         } else {
    3441          19 :                 config.bits.crit_warn.bits.available_spare = 1;
    3442          19 :                 config.bits.crit_warn.bits.temperature = 1;
    3443          19 :                 config.bits.crit_warn.bits.device_reliability = 1;
    3444          19 :                 config.bits.crit_warn.bits.read_only = 1;
    3445          19 :                 config.bits.crit_warn.bits.volatile_memory_backup = 1;
    3446             : 
    3447          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
    3448           4 :                         if (ctrlr->cdata.oaes.ns_attribute_notices) {
    3449           0 :                                 config.bits.ns_attr_notice = 1;
    3450             :                         }
    3451           4 :                         if (ctrlr->cdata.oaes.fw_activation_notices) {
    3452           0 :                                 config.bits.fw_activation_notice = 1;
    3453             :                         }
    3454           4 :                         if (ctrlr->cdata.oaes.ana_change_notices) {
    3455           0 :                                 config.bits.ana_change_notice = 1;
    3456             :                         }
    3457             :                 }
    3458          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
    3459           0 :                         config.bits.telemetry_log_notice = 1;
    3460             :                 }
    3461             :         }
    3462             : 
    3463          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
    3464          19 :                              ctrlr->opts.admin_timeout_ms);
    3465             : 
    3466          19 :         rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
    3467             :                         nvme_ctrlr_configure_aer_done,
    3468             :                         ctrlr);
    3469          19 :         if (rc != 0) {
    3470           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3471           0 :                 return rc;
    3472             :         }
    3473             : 
    3474          19 :         return 0;
    3475             : }
    3476             : 
    3477             : struct spdk_nvme_ctrlr_process *
    3478          61 : nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
    3479             : {
    3480             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3481             : 
    3482          61 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3483          12 :                 if (active_proc->pid == pid) {
    3484          12 :                         return active_proc;
    3485             :                 }
    3486             :         }
    3487             : 
    3488          49 :         return NULL;
    3489             : }
    3490             : 
    3491             : struct spdk_nvme_ctrlr_process *
    3492          57 : nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
    3493             : {
    3494          57 :         return nvme_ctrlr_get_process(ctrlr, getpid());
    3495             : }
    3496             : 
    3497             : /**
    3498             :  * This function will be called when a process is using the controller.
    3499             :  *  1. For the primary process, it is called when constructing the controller.
    3500             :  *  2. For the secondary process, it is called at probing the controller.
    3501             :  * Note: will check whether the process is already added for the same process.
    3502             :  */
    3503             : int
    3504           4 : nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
    3505             : {
    3506             :         struct spdk_nvme_ctrlr_process  *ctrlr_proc;
    3507           4 :         pid_t                           pid = getpid();
    3508             : 
    3509             :         /* Check whether the process is already added or not */
    3510           4 :         if (nvme_ctrlr_get_process(ctrlr, pid)) {
    3511           0 :                 return 0;
    3512             :         }
    3513             : 
    3514             :         /* Initialize the per process properties for this ctrlr */
    3515           4 :         ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
    3516             :                                   64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    3517           4 :         if (ctrlr_proc == NULL) {
    3518           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to allocate memory to track the process props\n");
    3519             : 
    3520           0 :                 return -1;
    3521             :         }
    3522             : 
    3523           4 :         ctrlr_proc->is_primary = spdk_process_is_primary();
    3524           4 :         ctrlr_proc->pid = pid;
    3525           4 :         STAILQ_INIT(&ctrlr_proc->active_reqs);
    3526           4 :         ctrlr_proc->devhandle = devhandle;
    3527           4 :         ctrlr_proc->ref = 0;
    3528           4 :         TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
    3529           4 :         STAILQ_INIT(&ctrlr_proc->async_events);
    3530             : 
    3531           4 :         TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
    3532             : 
    3533           4 :         return 0;
    3534             : }
    3535             : 
    3536             : /**
    3537             :  * This function will be called when the process detaches the controller.
    3538             :  * Note: the ctrlr_lock must be held when calling this function.
    3539             :  */
    3540             : static void
    3541           1 : nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
    3542             :                           struct spdk_nvme_ctrlr_process *proc)
    3543             : {
    3544             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3545             : 
    3546           1 :         assert(STAILQ_EMPTY(&proc->active_reqs));
    3547             : 
    3548           1 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3549           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3550             :         }
    3551             : 
    3552           1 :         TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
    3553             : 
    3554           1 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    3555           1 :                 spdk_pci_device_detach(proc->devhandle);
    3556             :         }
    3557             : 
    3558           1 :         spdk_free(proc);
    3559           1 : }
    3560             : 
    3561             : /**
    3562             :  * This function will be called when the process exited unexpectedly
    3563             :  *  in order to free any incomplete nvme request, allocated IO qpairs
    3564             :  *  and allocated memory.
    3565             :  * Note: the ctrlr_lock must be held when calling this function.
    3566             :  */
    3567             : static void
    3568           0 : nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
    3569             : {
    3570             :         struct nvme_request     *req, *tmp_req;
    3571             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3572             :         struct spdk_nvme_ctrlr_aer_completion *event;
    3573             : 
    3574           0 :         STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
    3575           0 :                 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
    3576             : 
    3577           0 :                 assert(req->pid == proc->pid);
    3578           0 :                 nvme_cleanup_user_req(req);
    3579           0 :                 nvme_free_request(req);
    3580             :         }
    3581             : 
    3582             :         /* Remove async event from each process objects event list */
    3583           0 :         while (!STAILQ_EMPTY(&proc->async_events)) {
    3584           0 :                 event = STAILQ_FIRST(&proc->async_events);
    3585           0 :                 STAILQ_REMOVE_HEAD(&proc->async_events, link);
    3586           0 :                 spdk_free(event);
    3587             :         }
    3588             : 
    3589           0 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3590           0 :                 TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
    3591             : 
    3592             :                 /*
    3593             :                  * The process may have been killed while some qpairs were in their
    3594             :                  *  completion context.  Clear that flag here to allow these IO
    3595             :                  *  qpairs to be deleted.
    3596             :                  */
    3597           0 :                 qpair->in_completion_context = 0;
    3598             : 
    3599           0 :                 qpair->no_deletion_notification_needed = 1;
    3600             : 
    3601           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3602             :         }
    3603             : 
    3604           0 :         spdk_free(proc);
    3605           0 : }
    3606             : 
    3607             : /**
    3608             :  * This function will be called when destructing the controller.
    3609             :  *  1. There is no more admin request on this controller.
    3610             :  *  2. Clean up any left resource allocation when its associated process is gone.
    3611             :  */
    3612             : void
    3613          50 : nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
    3614             : {
    3615             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3616             : 
    3617             :         /* Free all the processes' properties and make sure no pending admin IOs */
    3618          53 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3619           3 :                 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3620             : 
    3621           3 :                 assert(STAILQ_EMPTY(&active_proc->active_reqs));
    3622             : 
    3623           3 :                 spdk_free(active_proc);
    3624             :         }
    3625          50 : }
    3626             : 
    3627             : /**
    3628             :  * This function will be called when any other process attaches or
    3629             :  *  detaches the controller in order to cleanup those unexpectedly
    3630             :  *  terminated processes.
    3631             :  * Note: the ctrlr_lock must be held when calling this function.
    3632             :  */
    3633             : static int
    3634           0 : nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
    3635             : {
    3636             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3637           0 :         int                             active_proc_count = 0;
    3638             : 
    3639           0 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3640           0 :                 if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
    3641           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "process %d terminated unexpected\n", active_proc->pid);
    3642             : 
    3643           0 :                         TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3644             : 
    3645           0 :                         nvme_ctrlr_cleanup_process(active_proc);
    3646             :                 } else {
    3647           0 :                         active_proc_count++;
    3648             :                 }
    3649             :         }
    3650             : 
    3651           0 :         return active_proc_count;
    3652             : }
    3653             : 
    3654             : void
    3655           0 : nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
    3656             : {
    3657             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3658             : 
    3659           0 :         nvme_ctrlr_lock(ctrlr);
    3660             : 
    3661           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3662             : 
    3663           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3664           0 :         if (active_proc) {
    3665           0 :                 active_proc->ref++;
    3666             :         }
    3667             : 
    3668           0 :         nvme_ctrlr_unlock(ctrlr);
    3669           0 : }
    3670             : 
    3671             : void
    3672           0 : nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
    3673             : {
    3674             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3675             :         int                             proc_count;
    3676             : 
    3677           0 :         nvme_ctrlr_lock(ctrlr);
    3678             : 
    3679           0 :         proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
    3680             : 
    3681           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3682           0 :         if (active_proc) {
    3683           0 :                 active_proc->ref--;
    3684           0 :                 assert(active_proc->ref >= 0);
    3685             : 
    3686             :                 /*
    3687             :                  * The last active process will be removed at the end of
    3688             :                  * the destruction of the controller.
    3689             :                  */
    3690           0 :                 if (active_proc->ref == 0 && proc_count != 1) {
    3691           0 :                         nvme_ctrlr_remove_process(ctrlr, active_proc);
    3692             :                 }
    3693             :         }
    3694             : 
    3695           0 :         nvme_ctrlr_unlock(ctrlr);
    3696           0 : }
    3697             : 
    3698             : int
    3699           0 : nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
    3700             : {
    3701             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3702           0 :         int                             ref = 0;
    3703             : 
    3704           0 :         nvme_ctrlr_lock(ctrlr);
    3705             : 
    3706           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3707             : 
    3708           0 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3709           0 :                 ref += active_proc->ref;
    3710             :         }
    3711             : 
    3712           0 :         nvme_ctrlr_unlock(ctrlr);
    3713             : 
    3714           0 :         return ref;
    3715             : }
    3716             : 
    3717             : /**
    3718             :  *  Get the PCI device handle which is only visible to its associated process.
    3719             :  */
    3720             : struct spdk_pci_device *
    3721           0 : nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
    3722             : {
    3723             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3724           0 :         struct spdk_pci_device          *devhandle = NULL;
    3725             : 
    3726           0 :         nvme_ctrlr_lock(ctrlr);
    3727             : 
    3728           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3729           0 :         if (active_proc) {
    3730           0 :                 devhandle = active_proc->devhandle;
    3731             :         }
    3732             : 
    3733           0 :         nvme_ctrlr_unlock(ctrlr);
    3734             : 
    3735           0 :         return devhandle;
    3736             : }
    3737             : 
    3738             : static void
    3739          21 : nvme_ctrlr_process_init_vs_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3740             : {
    3741          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3742             : 
    3743          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3744           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the VS register\n");
    3745           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3746           0 :                 return;
    3747             :         }
    3748             : 
    3749          21 :         assert(value <= UINT32_MAX);
    3750          21 :         ctrlr->vs.raw = (uint32_t)value;
    3751          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP, NVME_TIMEOUT_INFINITE);
    3752             : }
    3753             : 
    3754             : static void
    3755          21 : nvme_ctrlr_process_init_cap_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3756             : {
    3757          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3758             : 
    3759          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3760           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CAP register\n");
    3761           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3762           0 :                 return;
    3763             :         }
    3764             : 
    3765          21 :         ctrlr->cap.raw = value;
    3766          21 :         nvme_ctrlr_init_cap(ctrlr);
    3767          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    3768             : }
    3769             : 
    3770             : static void
    3771          22 : nvme_ctrlr_process_init_check_en(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3772             : {
    3773          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3774             :         enum nvme_ctrlr_state state;
    3775             : 
    3776          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3777           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3778           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3779           0 :                 return;
    3780             :         }
    3781             : 
    3782          22 :         assert(value <= UINT32_MAX);
    3783          22 :         ctrlr->process_init_cc.raw = (uint32_t)value;
    3784             : 
    3785          22 :         if (ctrlr->process_init_cc.bits.en) {
    3786           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1\n");
    3787           2 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
    3788             :         } else {
    3789          20 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
    3790             :         }
    3791             : 
    3792          22 :         nvme_ctrlr_set_state(ctrlr, state, nvme_ctrlr_get_ready_timeout(ctrlr));
    3793             : }
    3794             : 
    3795             : static void
    3796           2 : nvme_ctrlr_process_init_set_en_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3797             : {
    3798           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3799             : 
    3800           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3801           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write the CC register\n");
    3802           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3803           0 :                 return;
    3804             :         }
    3805             : 
    3806             :         /*
    3807             :          * Wait 2.5 seconds before accessing PCI registers.
    3808             :          * Not using sleep() to avoid blocking other controller's initialization.
    3809             :          */
    3810           2 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
    3811           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Applying quirk: delay 2.5 seconds before reading registers\n");
    3812           0 :                 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
    3813             :         }
    3814             : 
    3815           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3816             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3817             : }
    3818             : 
    3819             : static void
    3820           2 : nvme_ctrlr_process_init_set_en_0_read_cc(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3821             : {
    3822           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3823             :         union spdk_nvme_cc_register cc;
    3824             :         int rc;
    3825             : 
    3826           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3827           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3828           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3829           0 :                 return;
    3830             :         }
    3831             : 
    3832           2 :         assert(value <= UINT32_MAX);
    3833           2 :         cc.raw = (uint32_t)value;
    3834           2 :         cc.bits.en = 0;
    3835           2 :         ctrlr->process_init_cc.raw = cc.raw;
    3836             : 
    3837           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
    3838             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3839             : 
    3840           2 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_process_init_set_en_0, ctrlr);
    3841           2 :         if (rc != 0) {
    3842           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    3843           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3844             :         }
    3845             : }
    3846             : 
    3847             : static void
    3848           2 : nvme_ctrlr_process_init_wait_for_ready_1(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3849             : {
    3850           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3851             :         union spdk_nvme_csts_register csts;
    3852             : 
    3853           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3854             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3855             :                  * temporarily. Allow for this case.
    3856             :                  */
    3857           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3858           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3859           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3860             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3861             :                 } else {
    3862           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3863           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3864             :                 }
    3865             : 
    3866           0 :                 return;
    3867             :         }
    3868             : 
    3869           2 :         assert(value <= UINT32_MAX);
    3870           2 :         csts.raw = (uint32_t)value;
    3871           2 :         if (csts.bits.rdy == 1 || csts.bits.cfs == 1) {
    3872           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0,
    3873             :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3874             :         } else {
    3875           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
    3876           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3877             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3878             :         }
    3879             : }
    3880             : 
    3881             : static void
    3882          22 : nvme_ctrlr_process_init_wait_for_ready_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3883             : {
    3884          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3885             :         union spdk_nvme_csts_register csts;
    3886             : 
    3887          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3888             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3889             :                  * temporarily. Allow for this case.
    3890             :                  */
    3891           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3892           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3893           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3894             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3895             :                 } else {
    3896           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3897           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3898             :                 }
    3899             : 
    3900           0 :                 return;
    3901             :         }
    3902             : 
    3903          22 :         assert(value <= UINT32_MAX);
    3904          22 :         csts.raw = (uint32_t)value;
    3905          22 :         if (csts.bits.rdy == 0) {
    3906          22 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 0 && CSTS.RDY = 0\n");
    3907          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLED,
    3908             :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3909             :         } else {
    3910           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3911             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3912             :         }
    3913             : }
    3914             : 
    3915             : static void
    3916           9 : nvme_ctrlr_process_init_enable_wait_for_ready_1(void *ctx, uint64_t value,
    3917             :                 const struct spdk_nvme_cpl *cpl)
    3918             : {
    3919           9 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3920             :         union spdk_nvme_csts_register csts;
    3921             : 
    3922           9 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3923             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3924             :                  * temporarily. Allow for this case.
    3925             :                  */
    3926           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3927           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3928           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3929             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3930             :                 } else {
    3931           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3932           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3933             :                 }
    3934             : 
    3935           0 :                 return;
    3936             :         }
    3937             : 
    3938           9 :         assert(value <= UINT32_MAX);
    3939           9 :         csts.raw = value;
    3940           9 :         if (csts.bits.rdy == 1) {
    3941           9 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
    3942             :                 /*
    3943             :                  * The controller has been enabled.
    3944             :                  *  Perform the rest of initialization serially.
    3945             :                  */
    3946           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
    3947           9 :                                      ctrlr->opts.admin_timeout_ms);
    3948             :         } else {
    3949           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3950             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3951             :         }
    3952             : }
    3953             : 
    3954             : /**
    3955             :  * This function will be called repeatedly during initialization until the controller is ready.
    3956             :  */
    3957             : int
    3958         446 : nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
    3959             : {
    3960             :         uint32_t ready_timeout_in_ms;
    3961             :         uint64_t ticks;
    3962         446 :         int rc = 0;
    3963             : 
    3964         446 :         ticks = spdk_get_ticks();
    3965             : 
    3966             :         /*
    3967             :          * May need to avoid accessing any register on the target controller
    3968             :          * for a while. Return early without touching the FSM.
    3969             :          * Check sleep_timeout_tsc > 0 for unit test.
    3970             :          */
    3971         446 :         if ((ctrlr->sleep_timeout_tsc > 0) &&
    3972           2 :             (ticks <= ctrlr->sleep_timeout_tsc)) {
    3973           1 :                 return 0;
    3974             :         }
    3975         445 :         ctrlr->sleep_timeout_tsc = 0;
    3976             : 
    3977         445 :         ready_timeout_in_ms = nvme_ctrlr_get_ready_timeout(ctrlr);
    3978             : 
    3979             :         /*
    3980             :          * Check if the current initialization step is done or has timed out.
    3981             :          */
    3982         445 :         switch (ctrlr->state) {
    3983           1 :         case NVME_CTRLR_STATE_INIT_DELAY:
    3984           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
    3985           1 :                 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
    3986             :                         /*
    3987             :                          * Controller may need some delay before it's enabled.
    3988             :                          *
    3989             :                          * This is a workaround for an issue where the PCIe-attached NVMe controller
    3990             :                          * is not ready after VFIO reset. We delay the initialization rather than the
    3991             :                          * enabling itself, because this is required only for the very first enabling
    3992             :                          * - directly after a VFIO reset.
    3993             :                          */
    3994           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Adding 2 second delay before initializing the controller\n");
    3995           1 :                         ctrlr->sleep_timeout_tsc = ticks + (2000 * spdk_get_ticks_hz() / 1000);
    3996             :                 }
    3997           1 :                 break;
    3998             : 
    3999           0 :         case NVME_CTRLR_STATE_DISCONNECTED:
    4000           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    4001           0 :                 break;
    4002             : 
    4003          21 :         case NVME_CTRLR_STATE_CONNECT_ADMINQ: /* synonymous with NVME_CTRLR_STATE_INIT and NVME_CTRLR_STATE_DISCONNECTED */
    4004          21 :                 rc = nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq);
    4005          21 :                 if (rc == 0) {
    4006          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
    4007             :                                              NVME_TIMEOUT_INFINITE);
    4008             :                 } else {
    4009           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4010             :                 }
    4011          21 :                 break;
    4012             : 
    4013          21 :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    4014          21 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4015             : 
    4016          21 :                 switch (nvme_qpair_get_state(ctrlr->adminq)) {
    4017           0 :                 case NVME_QPAIR_CONNECTING:
    4018           0 :                         if (ctrlr->is_failed) {
    4019           0 :                                 nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    4020           0 :                                 break;
    4021             :                         }
    4022             : 
    4023           0 :                         break;
    4024          21 :                 case NVME_QPAIR_CONNECTED:
    4025          21 :                         nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
    4026             :                 /* Fall through */
    4027          21 :                 case NVME_QPAIR_ENABLED:
    4028          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS,
    4029             :                                              NVME_TIMEOUT_INFINITE);
    4030             :                         /* Abort any queued requests that were sent while the adminq was connecting
    4031             :                          * to avoid stalling the init process during a reset, as requests don't get
    4032             :                          * resubmitted while the controller is resetting and subsequent commands
    4033             :                          * would get queued too.
    4034             :                          */
    4035          21 :                         nvme_qpair_abort_queued_reqs(ctrlr->adminq);
    4036          21 :                         break;
    4037           0 :                 case NVME_QPAIR_DISCONNECTING:
    4038           0 :                         assert(ctrlr->adminq->async == true);
    4039           0 :                         break;
    4040           0 :                 case NVME_QPAIR_DISCONNECTED:
    4041             :                 /* fallthrough */
    4042             :                 default:
    4043           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4044           0 :                         break;
    4045             :                 }
    4046             : 
    4047          21 :                 break;
    4048             : 
    4049          21 :         case NVME_CTRLR_STATE_READ_VS:
    4050          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS, NVME_TIMEOUT_INFINITE);
    4051          21 :                 rc = nvme_ctrlr_get_vs_async(ctrlr, nvme_ctrlr_process_init_vs_done, ctrlr);
    4052          21 :                 break;
    4053             : 
    4054          21 :         case NVME_CTRLR_STATE_READ_CAP:
    4055          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP, NVME_TIMEOUT_INFINITE);
    4056          21 :                 rc = nvme_ctrlr_get_cap_async(ctrlr, nvme_ctrlr_process_init_cap_done, ctrlr);
    4057          21 :                 break;
    4058             : 
    4059          22 :         case NVME_CTRLR_STATE_CHECK_EN:
    4060             :                 /* Begin the hardware initialization by making sure the controller is disabled. */
    4061          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC, ready_timeout_in_ms);
    4062          22 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_check_en, ctrlr);
    4063          22 :                 break;
    4064             : 
    4065           2 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    4066             :                 /*
    4067             :                  * Controller is currently enabled. We need to disable it to cause a reset.
    4068             :                  *
    4069             :                  * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
    4070             :                  *  Wait for the ready bit to be 1 before disabling the controller.
    4071             :                  */
    4072           2 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    4073             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4074           2 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_1, ctrlr);
    4075           2 :                 break;
    4076             : 
    4077           2 :         case NVME_CTRLR_STATE_SET_EN_0:
    4078           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 0\n");
    4079           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC, ready_timeout_in_ms);
    4080           2 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_set_en_0_read_cc, ctrlr);
    4081           2 :                 break;
    4082             : 
    4083          22 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    4084          22 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
    4085             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4086          22 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_0, ctrlr);
    4087          22 :                 break;
    4088             : 
    4089          21 :         case NVME_CTRLR_STATE_DISABLED:
    4090          21 :                 if (ctrlr->is_disconnecting) {
    4091           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr was disabled.\n");
    4092             :                 } else {
    4093          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
    4094             : 
    4095             :                         /*
    4096             :                          * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
    4097             :                          *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
    4098             :                          */
    4099          21 :                         spdk_delay_us(100);
    4100             :                 }
    4101          21 :                 break;
    4102             : 
    4103          21 :         case NVME_CTRLR_STATE_ENABLE:
    4104          21 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 1\n");
    4105          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC, ready_timeout_in_ms);
    4106          21 :                 rc = nvme_ctrlr_enable(ctrlr);
    4107          21 :                 if (rc) {
    4108           7 :                         NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr enable failed with error: %d", rc);
    4109             :                 }
    4110          21 :                 return rc;
    4111             : 
    4112           9 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    4113           9 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    4114             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4115           9 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_enable_wait_for_ready_1,
    4116             :                                                ctrlr);
    4117           9 :                 break;
    4118             : 
    4119           9 :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    4120           9 :                 nvme_transport_qpair_reset(ctrlr->adminq);
    4121           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, NVME_TIMEOUT_INFINITE);
    4122           9 :                 break;
    4123             : 
    4124          16 :         case NVME_CTRLR_STATE_IDENTIFY:
    4125          16 :                 rc = nvme_ctrlr_identify(ctrlr);
    4126          16 :                 break;
    4127             : 
    4128          19 :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    4129          19 :                 rc = nvme_ctrlr_configure_aer(ctrlr);
    4130          19 :                 break;
    4131             : 
    4132          22 :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    4133          22 :                 rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
    4134          22 :                 break;
    4135             : 
    4136          19 :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    4137          19 :                 rc = nvme_ctrlr_identify_iocs_specific(ctrlr);
    4138          19 :                 break;
    4139             : 
    4140           0 :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    4141           0 :                 rc = nvme_ctrlr_get_zns_cmd_and_effects_log(ctrlr);
    4142           0 :                 break;
    4143             : 
    4144          19 :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    4145          19 :                 nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
    4146          19 :                 rc = nvme_ctrlr_set_num_queues(ctrlr);
    4147          19 :                 break;
    4148             : 
    4149          24 :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    4150          24 :                 _nvme_ctrlr_identify_active_ns(ctrlr);
    4151          24 :                 break;
    4152             : 
    4153          14 :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    4154          14 :                 rc = nvme_ctrlr_identify_namespaces(ctrlr);
    4155          14 :                 break;
    4156             : 
    4157          14 :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    4158          14 :                 rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
    4159          14 :                 break;
    4160             : 
    4161          14 :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    4162          14 :                 rc = nvme_ctrlr_identify_namespaces_iocs_specific(ctrlr);
    4163          14 :                 break;
    4164             : 
    4165          15 :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    4166          15 :                 rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
    4167          15 :                 break;
    4168             : 
    4169           1 :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    4170           1 :                 rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
    4171           1 :                 break;
    4172             : 
    4173          14 :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    4174          14 :                 nvme_ctrlr_set_supported_features(ctrlr);
    4175          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_FEATURE,
    4176          14 :                                      ctrlr->opts.admin_timeout_ms);
    4177          14 :                 break;
    4178             : 
    4179          16 :         case NVME_CTRLR_STATE_SET_HOST_FEATURE:
    4180          16 :                 rc = nvme_ctrlr_set_host_feature(ctrlr);
    4181          16 :                 break;
    4182             : 
    4183          14 :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    4184          14 :                 rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
    4185          14 :                 break;
    4186             : 
    4187          14 :         case NVME_CTRLR_STATE_SET_HOST_ID:
    4188          14 :                 rc = nvme_ctrlr_set_host_id(ctrlr);
    4189          14 :                 break;
    4190             : 
    4191          17 :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    4192          17 :                 rc = nvme_transport_ctrlr_ready(ctrlr);
    4193          17 :                 if (rc) {
    4194           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Transport controller ready step failed: rc %d\n", rc);
    4195           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4196             :                 } else {
    4197          16 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    4198             :                 }
    4199          17 :                 break;
    4200             : 
    4201           0 :         case NVME_CTRLR_STATE_READY:
    4202           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr already in ready state\n");
    4203           0 :                 return 0;
    4204             : 
    4205           0 :         case NVME_CTRLR_STATE_ERROR:
    4206           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr is in error state\n");
    4207           0 :                 return -1;
    4208             : 
    4209           0 :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    4210             :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    4211             :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    4212             :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    4213             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4214             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    4215             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    4216             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4217             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    4218             :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    4219             :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    4220             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    4221             :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    4222             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    4223             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    4224             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    4225             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    4226             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    4227             :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    4228             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
    4229             :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    4230             :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    4231             :                 /*
    4232             :                  * nvme_ctrlr_process_init() may be called from the completion context
    4233             :                  * for the admin qpair. Avoid recursive calls for this case.
    4234             :                  */
    4235           0 :                 if (!ctrlr->adminq->in_completion_context) {
    4236           0 :                         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4237             :                 }
    4238           0 :                 break;
    4239             : 
    4240           0 :         default:
    4241           0 :                 assert(0);
    4242             :                 return -1;
    4243             :         }
    4244             : 
    4245         424 :         if (rc) {
    4246           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr operation failed with error: %d, ctrlr state: %d (%s)\n",
    4247             :                                   rc, ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4248             :         }
    4249             : 
    4250             :         /* Note: we use the ticks captured when we entered this function.
    4251             :          * This covers environments where the SPDK process gets swapped out after
    4252             :          * we tried to advance the state but before we check the timeout here.
    4253             :          * It is not normal for this to happen, but harmless to handle it in this
    4254             :          * way.
    4255             :          */
    4256         424 :         if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
    4257           0 :             ticks > ctrlr->state_timeout_tsc) {
    4258           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Initialization timed out in state %d (%s)\n",
    4259             :                                   ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4260           0 :                 return -1;
    4261             :         }
    4262             : 
    4263         424 :         return rc;
    4264             : }
    4265             : 
    4266             : int
    4267          47 : nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
    4268             : {
    4269          47 :         pthread_mutexattr_t attr;
    4270          47 :         int rc = 0;
    4271             : 
    4272          47 :         if (pthread_mutexattr_init(&attr)) {
    4273           0 :                 return -1;
    4274             :         }
    4275          94 :         if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
    4276             : #ifndef __FreeBSD__
    4277          94 :             pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
    4278          94 :             pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
    4279             : #endif
    4280          47 :             pthread_mutex_init(mtx, &attr)) {
    4281           0 :                 rc = -1;
    4282             :         }
    4283          47 :         pthread_mutexattr_destroy(&attr);
    4284          47 :         return rc;
    4285             : }
    4286             : 
    4287             : int
    4288          47 : nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
    4289             : {
    4290             :         int rc;
    4291             : 
    4292          47 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    4293           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
    4294             :         } else {
    4295          46 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    4296             :         }
    4297             : 
    4298          47 :         if (ctrlr->opts.admin_queue_size > SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES) {
    4299           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "admin_queue_size %u exceeds max defined by NVMe spec, use max value\n",
    4300             :                                   ctrlr->opts.admin_queue_size);
    4301           0 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES;
    4302             :         }
    4303             : 
    4304          47 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE &&
    4305           0 :             (ctrlr->opts.admin_queue_size % SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE) != 0) {
    4306           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4307             :                                   "admin_queue_size %u is invalid for this NVMe device, adjust to next multiple\n",
    4308             :                                   ctrlr->opts.admin_queue_size);
    4309           0 :                 ctrlr->opts.admin_queue_size = SPDK_ALIGN_CEIL(ctrlr->opts.admin_queue_size,
    4310             :                                                SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE);
    4311             :         }
    4312             : 
    4313          47 :         if (ctrlr->opts.admin_queue_size < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES) {
    4314          26 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4315             :                                   "admin_queue_size %u is less than minimum defined by NVMe spec, use min value\n",
    4316             :                                   ctrlr->opts.admin_queue_size);
    4317          26 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES;
    4318             :         }
    4319             : 
    4320          47 :         ctrlr->flags = 0;
    4321          47 :         ctrlr->free_io_qids = NULL;
    4322          47 :         ctrlr->is_resetting = false;
    4323          47 :         ctrlr->is_failed = false;
    4324          47 :         ctrlr->is_destructed = false;
    4325             : 
    4326          47 :         TAILQ_INIT(&ctrlr->active_io_qpairs);
    4327          47 :         STAILQ_INIT(&ctrlr->queued_aborts);
    4328          47 :         ctrlr->outstanding_aborts = 0;
    4329             : 
    4330          47 :         ctrlr->ana_log_page = NULL;
    4331          47 :         ctrlr->ana_log_page_size = 0;
    4332             : 
    4333          47 :         rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
    4334          47 :         if (rc != 0) {
    4335           0 :                 return rc;
    4336             :         }
    4337             : 
    4338          47 :         TAILQ_INIT(&ctrlr->active_procs);
    4339          47 :         STAILQ_INIT(&ctrlr->register_operations);
    4340             : 
    4341          47 :         RB_INIT(&ctrlr->ns);
    4342             : 
    4343          47 :         return rc;
    4344             : }
    4345             : 
    4346             : static void
    4347          21 : nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr)
    4348             : {
    4349          21 :         if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
    4350           5 :                 ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
    4351             :         }
    4352             : 
    4353          21 :         ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
    4354             : 
    4355             :         /* For now, always select page_size == min_page_size. */
    4356          21 :         ctrlr->page_size = ctrlr->min_page_size;
    4357             : 
    4358          21 :         ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
    4359          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
    4360          21 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE &&
    4361           0 :             ctrlr->opts.io_queue_size == DEFAULT_IO_QUEUE_SIZE) {
    4362             :                 /* If the user specifically set an IO queue size different than the
    4363             :                  * default, use that value.  Otherwise overwrite with the quirked value.
    4364             :                  * This allows this quirk to be overridden when necessary.
    4365             :                  * However, cap.mqes still needs to be respected.
    4366             :                  */
    4367           0 :                 ctrlr->opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK;
    4368             :         }
    4369          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
    4370             : 
    4371          21 :         ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
    4372          21 : }
    4373             : 
    4374             : void
    4375          47 : nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
    4376             : {
    4377             :         int rc;
    4378             : 
    4379          47 :         if (ctrlr->lock_depth > 0) {
    4380           0 :                 SPDK_ERRLOG("lock currently held (depth=%d)!\n", ctrlr->lock_depth);
    4381           0 :                 assert(false);
    4382             :         }
    4383             : 
    4384          47 :         rc = pthread_mutex_destroy(&ctrlr->ctrlr_lock);
    4385          47 :         if (rc) {
    4386           0 :                 SPDK_ERRLOG("could not destroy ctrlr_lock: %s\n", spdk_strerror(rc));
    4387           0 :                 assert(false);
    4388             :         }
    4389             : 
    4390          47 :         nvme_ctrlr_free_processes(ctrlr);
    4391          47 : }
    4392             : 
    4393             : void
    4394          47 : nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
    4395             :                           struct nvme_ctrlr_detach_ctx *ctx)
    4396             : {
    4397             :         struct spdk_nvme_qpair *qpair, *tmp;
    4398             : 
    4399          47 :         NVME_CTRLR_DEBUGLOG(ctrlr, "Prepare to destruct SSD\n");
    4400             : 
    4401          47 :         ctrlr->prepare_for_reset = false;
    4402          47 :         ctrlr->is_destructed = true;
    4403             : 
    4404          47 :         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4405             : 
    4406          47 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    4407          47 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    4408             : 
    4409          47 :         TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
    4410           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    4411             :         }
    4412             : 
    4413          47 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    4414          47 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    4415             : 
    4416          47 :         nvme_ctrlr_shutdown_async(ctrlr, ctx);
    4417          47 : }
    4418             : 
    4419             : int
    4420          86 : nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    4421             :                                struct nvme_ctrlr_detach_ctx *ctx)
    4422             : {
    4423             :         struct spdk_nvme_ns *ns, *tmp_ns;
    4424          86 :         int rc = 0;
    4425             : 
    4426          86 :         if (!ctx->shutdown_complete) {
    4427          78 :                 rc = nvme_ctrlr_shutdown_poll_async(ctrlr, ctx);
    4428          78 :                 if (rc == -EAGAIN) {
    4429          39 :                         return -EAGAIN;
    4430             :                 }
    4431             :                 /* Destruct ctrlr forcefully for any other error. */
    4432             :         }
    4433             : 
    4434          47 :         if (ctx->cb_fn) {
    4435           0 :                 ctx->cb_fn(ctrlr);
    4436             :         }
    4437             : 
    4438          47 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    4439             : 
    4440        7733 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    4441        7686 :                 nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    4442        7686 :                 RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    4443        7686 :                 spdk_free(ns);
    4444             :         }
    4445             : 
    4446          47 :         ctrlr->active_ns_count = 0;
    4447             : 
    4448          47 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    4449             : 
    4450          47 :         free(ctrlr->ana_log_page);
    4451          47 :         free(ctrlr->copied_ana_desc);
    4452          47 :         ctrlr->ana_log_page = NULL;
    4453          47 :         ctrlr->copied_ana_desc = NULL;
    4454          47 :         ctrlr->ana_log_page_size = 0;
    4455             : 
    4456          47 :         nvme_transport_ctrlr_destruct(ctrlr);
    4457             : 
    4458          47 :         return rc;
    4459             : }
    4460             : 
    4461             : void
    4462          47 : nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
    4463             : {
    4464          47 :         struct nvme_ctrlr_detach_ctx ctx = { .ctrlr = ctrlr };
    4465             :         int rc;
    4466             : 
    4467          47 :         nvme_ctrlr_destruct_async(ctrlr, &ctx);
    4468             : 
    4469             :         while (1) {
    4470          86 :                 rc = nvme_ctrlr_destruct_poll_async(ctrlr, &ctx);
    4471          86 :                 if (rc != -EAGAIN) {
    4472          47 :                         break;
    4473             :                 }
    4474          39 :                 nvme_delay(1000);
    4475             :         }
    4476          47 : }
    4477             : 
    4478             : int
    4479          24 : nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
    4480             :                                 struct nvme_request *req)
    4481             : {
    4482          24 :         return nvme_qpair_submit_request(ctrlr->adminq, req);
    4483             : }
    4484             : 
    4485             : static void
    4486           0 : nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
    4487             : {
    4488             :         /* Do nothing */
    4489           0 : }
    4490             : 
    4491             : /*
    4492             :  * Check if we need to send a Keep Alive command.
    4493             :  * Caller must hold ctrlr->ctrlr_lock.
    4494             :  */
    4495             : static int
    4496           0 : nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
    4497             : {
    4498             :         uint64_t now;
    4499             :         struct nvme_request *req;
    4500             :         struct spdk_nvme_cmd *cmd;
    4501           0 :         int rc = 0;
    4502             : 
    4503           0 :         now = spdk_get_ticks();
    4504           0 :         if (now < ctrlr->next_keep_alive_tick) {
    4505           0 :                 return rc;
    4506             :         }
    4507             : 
    4508           0 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
    4509           0 :         if (req == NULL) {
    4510           0 :                 return rc;
    4511             :         }
    4512             : 
    4513           0 :         cmd = &req->cmd;
    4514           0 :         cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
    4515             : 
    4516           0 :         rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
    4517           0 :         if (rc != 0) {
    4518           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Submitting Keep Alive failed\n");
    4519           0 :                 rc = -ENXIO;
    4520             :         }
    4521             : 
    4522           0 :         ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
    4523           0 :         return rc;
    4524             : }
    4525             : 
    4526             : int32_t
    4527           1 : spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
    4528             : {
    4529             :         int32_t num_completions;
    4530             :         int32_t rc;
    4531             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4532             : 
    4533           1 :         nvme_ctrlr_lock(ctrlr);
    4534             : 
    4535           1 :         if (ctrlr->keep_alive_interval_ticks) {
    4536           0 :                 rc = nvme_ctrlr_keep_alive(ctrlr);
    4537           0 :                 if (rc) {
    4538           0 :                         nvme_ctrlr_unlock(ctrlr);
    4539           0 :                         return rc;
    4540             :                 }
    4541             :         }
    4542             : 
    4543           1 :         rc = nvme_io_msg_process(ctrlr);
    4544           1 :         if (rc < 0) {
    4545           0 :                 nvme_ctrlr_unlock(ctrlr);
    4546           0 :                 return rc;
    4547             :         }
    4548           1 :         num_completions = rc;
    4549             : 
    4550           1 :         rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4551             : 
    4552             :         /* Each process has an async list, complete the ones for this process object */
    4553           1 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4554           1 :         if (active_proc) {
    4555           0 :                 nvme_ctrlr_complete_queued_async_events(ctrlr);
    4556             :         }
    4557             : 
    4558           1 :         if (rc == -ENXIO && ctrlr->is_disconnecting) {
    4559           1 :                 nvme_ctrlr_disconnect_done(ctrlr);
    4560             :         }
    4561             : 
    4562           1 :         nvme_ctrlr_unlock(ctrlr);
    4563             : 
    4564           1 :         if (rc < 0) {
    4565           1 :                 num_completions = rc;
    4566             :         } else {
    4567           0 :                 num_completions += rc;
    4568             :         }
    4569             : 
    4570           1 :         return num_completions;
    4571             : }
    4572             : 
    4573             : const struct spdk_nvme_ctrlr_data *
    4574           0 : spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
    4575             : {
    4576           0 :         return &ctrlr->cdata;
    4577             : }
    4578             : 
    4579           0 : union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
    4580             : {
    4581           0 :         union spdk_nvme_csts_register csts;
    4582             : 
    4583           0 :         if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
    4584           0 :                 csts.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4585             :         }
    4586           0 :         return csts;
    4587             : }
    4588             : 
    4589           0 : union spdk_nvme_cc_register spdk_nvme_ctrlr_get_regs_cc(struct spdk_nvme_ctrlr *ctrlr)
    4590             : {
    4591           0 :         union spdk_nvme_cc_register cc;
    4592             : 
    4593           0 :         if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
    4594           0 :                 cc.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4595             :         }
    4596           0 :         return cc;
    4597             : }
    4598             : 
    4599           0 : union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
    4600             : {
    4601           0 :         return ctrlr->cap;
    4602             : }
    4603             : 
    4604           0 : union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
    4605             : {
    4606           0 :         return ctrlr->vs;
    4607             : }
    4608             : 
    4609           0 : union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
    4610             : {
    4611           0 :         union spdk_nvme_cmbsz_register cmbsz;
    4612             : 
    4613           0 :         if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
    4614           0 :                 cmbsz.raw = 0;
    4615             :         }
    4616             : 
    4617           0 :         return cmbsz;
    4618             : }
    4619             : 
    4620           0 : union spdk_nvme_pmrcap_register spdk_nvme_ctrlr_get_regs_pmrcap(struct spdk_nvme_ctrlr *ctrlr)
    4621             : {
    4622           0 :         union spdk_nvme_pmrcap_register pmrcap;
    4623             : 
    4624           0 :         if (nvme_ctrlr_get_pmrcap(ctrlr, &pmrcap)) {
    4625           0 :                 pmrcap.raw = 0;
    4626             :         }
    4627             : 
    4628           0 :         return pmrcap;
    4629             : }
    4630             : 
    4631           0 : union spdk_nvme_bpinfo_register spdk_nvme_ctrlr_get_regs_bpinfo(struct spdk_nvme_ctrlr *ctrlr)
    4632             : {
    4633           0 :         union spdk_nvme_bpinfo_register bpinfo;
    4634             : 
    4635           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    4636           0 :                 bpinfo.raw = 0;
    4637             :         }
    4638             : 
    4639           0 :         return bpinfo;
    4640             : }
    4641             : 
    4642             : uint64_t
    4643           0 : spdk_nvme_ctrlr_get_pmrsz(struct spdk_nvme_ctrlr *ctrlr)
    4644             : {
    4645           0 :         return ctrlr->pmr_size;
    4646             : }
    4647             : 
    4648             : uint32_t
    4649           2 : spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
    4650             : {
    4651           2 :         return ctrlr->cdata.nn;
    4652             : }
    4653             : 
    4654             : bool
    4655        9301 : spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4656             : {
    4657        9301 :         struct spdk_nvme_ns tmp, *ns;
    4658             : 
    4659        9301 :         tmp.id = nsid;
    4660        9301 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4661             : 
    4662        9301 :         if (ns != NULL) {
    4663        9209 :                 return ns->active;
    4664             :         }
    4665             : 
    4666          92 :         return false;
    4667             : }
    4668             : 
    4669             : uint32_t
    4670          35 : spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    4671             : {
    4672             :         struct spdk_nvme_ns *ns;
    4673             : 
    4674          35 :         ns = RB_MIN(nvme_ns_tree, &ctrlr->ns);
    4675          35 :         if (ns == NULL) {
    4676          10 :                 return 0;
    4677             :         }
    4678             : 
    4679        4618 :         while (ns != NULL) {
    4680        4615 :                 if (ns->active) {
    4681          22 :                         return ns->id;
    4682             :                 }
    4683             : 
    4684        4593 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4685             :         }
    4686             : 
    4687           3 :         return 0;
    4688             : }
    4689             : 
    4690             : uint32_t
    4691        4657 : spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    4692             : {
    4693        4657 :         struct spdk_nvme_ns tmp, *ns;
    4694             : 
    4695        4657 :         tmp.id = prev_nsid;
    4696        4657 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4697        4657 :         if (ns == NULL) {
    4698           5 :                 return 0;
    4699             :         }
    4700             : 
    4701        4652 :         ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4702        6184 :         while (ns != NULL) {
    4703        6164 :                 if (ns->active) {
    4704        4632 :                         return ns->id;
    4705             :                 }
    4706             : 
    4707        1532 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4708             :         }
    4709             : 
    4710          20 :         return 0;
    4711             : }
    4712             : 
    4713             : struct spdk_nvme_ns *
    4714       12403 : spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4715             : {
    4716       12403 :         struct spdk_nvme_ns tmp;
    4717             :         struct spdk_nvme_ns *ns;
    4718             : 
    4719       12403 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    4720          18 :                 return NULL;
    4721             :         }
    4722             : 
    4723       12385 :         nvme_ctrlr_lock(ctrlr);
    4724             : 
    4725       12385 :         tmp.id = nsid;
    4726       12385 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4727             : 
    4728       12385 :         if (ns == NULL) {
    4729        7687 :                 ns = spdk_zmalloc(sizeof(struct spdk_nvme_ns), 64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    4730        7687 :                 if (ns == NULL) {
    4731           0 :                         nvme_ctrlr_unlock(ctrlr);
    4732           0 :                         return NULL;
    4733             :                 }
    4734             : 
    4735        7687 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was added\n", nsid);
    4736        7687 :                 ns->id = nsid;
    4737        7687 :                 RB_INSERT(nvme_ns_tree, &ctrlr->ns, ns);
    4738             :         }
    4739             : 
    4740       12385 :         nvme_ctrlr_unlock(ctrlr);
    4741             : 
    4742       12385 :         return ns;
    4743             : }
    4744             : 
    4745             : struct spdk_pci_device *
    4746           0 : spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
    4747             : {
    4748           0 :         if (ctrlr == NULL) {
    4749           0 :                 return NULL;
    4750             :         }
    4751             : 
    4752           0 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    4753           0 :                 return NULL;
    4754             :         }
    4755             : 
    4756           0 :         return nvme_ctrlr_proc_get_devhandle(ctrlr);
    4757             : }
    4758             : 
    4759             : int32_t
    4760           3 : spdk_nvme_ctrlr_get_numa_id(struct spdk_nvme_ctrlr *ctrlr)
    4761             : {
    4762           3 :         if (ctrlr->numa.id_valid) {
    4763           2 :                 return ctrlr->numa.id;
    4764             :         } else {
    4765           1 :                 return SPDK_ENV_NUMA_ID_ANY;
    4766             :         }
    4767             : }
    4768             : 
    4769             : uint16_t
    4770           0 : spdk_nvme_ctrlr_get_id(struct spdk_nvme_ctrlr *ctrlr)
    4771             : {
    4772           0 :         return ctrlr->cntlid;
    4773             : }
    4774             : 
    4775             : uint32_t
    4776           0 : spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
    4777             : {
    4778           0 :         return ctrlr->max_xfer_size;
    4779             : }
    4780             : 
    4781             : uint16_t
    4782           0 : spdk_nvme_ctrlr_get_max_sges(const struct spdk_nvme_ctrlr *ctrlr)
    4783             : {
    4784           0 :         if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
    4785           0 :                 return ctrlr->max_sges;
    4786             :         } else {
    4787           0 :                 return UINT16_MAX;
    4788             :         }
    4789             : }
    4790             : 
    4791             : void
    4792           2 : spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
    4793             :                                       spdk_nvme_aer_cb aer_cb_fn,
    4794             :                                       void *aer_cb_arg)
    4795             : {
    4796             :         struct spdk_nvme_ctrlr_process *active_proc;
    4797             : 
    4798           2 :         nvme_ctrlr_lock(ctrlr);
    4799             : 
    4800           2 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4801           2 :         if (active_proc) {
    4802           2 :                 active_proc->aer_cb_fn = aer_cb_fn;
    4803           2 :                 active_proc->aer_cb_arg = aer_cb_arg;
    4804             :         }
    4805             : 
    4806           2 :         nvme_ctrlr_unlock(ctrlr);
    4807           2 : }
    4808             : 
    4809             : void
    4810           0 : spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page(struct spdk_nvme_ctrlr *ctrlr)
    4811             : {
    4812           0 :         ctrlr->opts.disable_read_changed_ns_list_log_page = true;
    4813           0 : }
    4814             : 
    4815             : void
    4816           0 : spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
    4817             :                 uint64_t timeout_io_us, uint64_t timeout_admin_us,
    4818             :                 spdk_nvme_timeout_cb cb_fn, void *cb_arg)
    4819             : {
    4820             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4821             : 
    4822           0 :         nvme_ctrlr_lock(ctrlr);
    4823             : 
    4824           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4825           0 :         if (active_proc) {
    4826           0 :                 active_proc->timeout_io_ticks = timeout_io_us * spdk_get_ticks_hz() / 1000000ULL;
    4827           0 :                 active_proc->timeout_admin_ticks = timeout_admin_us * spdk_get_ticks_hz() / 1000000ULL;
    4828           0 :                 active_proc->timeout_cb_fn = cb_fn;
    4829           0 :                 active_proc->timeout_cb_arg = cb_arg;
    4830             :         }
    4831             : 
    4832           0 :         ctrlr->timeout_enabled = true;
    4833             : 
    4834           0 :         nvme_ctrlr_unlock(ctrlr);
    4835           0 : }
    4836             : 
    4837             : bool
    4838           8 : spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
    4839             : {
    4840             :         /* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
    4841             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
    4842           8 :         return ctrlr->log_page_supported[log_page];
    4843             : }
    4844             : 
    4845             : bool
    4846           4 : spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
    4847             : {
    4848             :         /* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
    4849             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
    4850           4 :         return ctrlr->feature_supported[feature_code];
    4851             : }
    4852             : 
    4853             : int
    4854           1 : spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4855             :                           struct spdk_nvme_ctrlr_list *payload)
    4856             : {
    4857             :         struct nvme_completion_poll_status      *status;
    4858             :         struct spdk_nvme_ns                     *ns;
    4859             :         int                                     res;
    4860             : 
    4861           1 :         if (nsid == 0) {
    4862           0 :                 return -EINVAL;
    4863             :         }
    4864             : 
    4865           1 :         status = calloc(1, sizeof(*status));
    4866           1 :         if (!status) {
    4867           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4868           0 :                 return -ENOMEM;
    4869             :         }
    4870             : 
    4871           1 :         res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
    4872             :                                        nvme_completion_poll_cb, status);
    4873           1 :         if (res) {
    4874           0 :                 free(status);
    4875           0 :                 return res;
    4876             :         }
    4877           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4878           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_attach_ns failed!\n");
    4879           0 :                 if (!status->timed_out) {
    4880           0 :                         free(status);
    4881             :                 }
    4882           0 :                 return -ENXIO;
    4883             :         }
    4884           1 :         free(status);
    4885             : 
    4886           1 :         res = nvme_ctrlr_identify_active_ns(ctrlr);
    4887           1 :         if (res) {
    4888           0 :                 return res;
    4889             :         }
    4890             : 
    4891           1 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    4892           1 :         if (ns == NULL) {
    4893           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_get_ns failed!\n");
    4894           0 :                 return -ENXIO;
    4895             :         }
    4896             : 
    4897           1 :         return nvme_ns_construct(ns, nsid, ctrlr);
    4898             : }
    4899             : 
    4900             : int
    4901           1 : spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4902             :                           struct spdk_nvme_ctrlr_list *payload)
    4903             : {
    4904             :         struct nvme_completion_poll_status      *status;
    4905             :         int                                     res;
    4906             : 
    4907           1 :         if (nsid == 0) {
    4908           0 :                 return -EINVAL;
    4909             :         }
    4910             : 
    4911           1 :         status = calloc(1, sizeof(*status));
    4912           1 :         if (!status) {
    4913           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4914           0 :                 return -ENOMEM;
    4915             :         }
    4916             : 
    4917           1 :         res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
    4918             :                                        nvme_completion_poll_cb, status);
    4919           1 :         if (res) {
    4920           0 :                 free(status);
    4921           0 :                 return res;
    4922             :         }
    4923           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4924           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_detach_ns failed!\n");
    4925           0 :                 if (!status->timed_out) {
    4926           0 :                         free(status);
    4927             :                 }
    4928           0 :                 return -ENXIO;
    4929             :         }
    4930           1 :         free(status);
    4931             : 
    4932           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4933             : }
    4934             : 
    4935             : uint32_t
    4936           1 : spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
    4937             : {
    4938             :         struct nvme_completion_poll_status      *status;
    4939             :         int                                     res;
    4940             :         uint32_t                                nsid;
    4941             : 
    4942           1 :         status = calloc(1, sizeof(*status));
    4943           1 :         if (!status) {
    4944           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4945           0 :                 return 0;
    4946             :         }
    4947             : 
    4948           1 :         res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
    4949           1 :         if (res) {
    4950           0 :                 free(status);
    4951           0 :                 return 0;
    4952             :         }
    4953           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4954           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_create_ns failed!\n");
    4955           0 :                 if (!status->timed_out) {
    4956           0 :                         free(status);
    4957             :                 }
    4958           0 :                 return 0;
    4959             :         }
    4960             : 
    4961           1 :         nsid = status->cpl.cdw0;
    4962           1 :         free(status);
    4963             : 
    4964           1 :         assert(nsid > 0);
    4965             : 
    4966             :         /* Return the namespace ID that was created */
    4967           1 :         return nsid;
    4968             : }
    4969             : 
    4970             : int
    4971           1 : spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4972             : {
    4973             :         struct nvme_completion_poll_status      *status;
    4974             :         int                                     res;
    4975             : 
    4976           1 :         if (nsid == 0) {
    4977           0 :                 return -EINVAL;
    4978             :         }
    4979             : 
    4980           1 :         status = calloc(1, sizeof(*status));
    4981           1 :         if (!status) {
    4982           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4983           0 :                 return -ENOMEM;
    4984             :         }
    4985             : 
    4986           1 :         res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
    4987           1 :         if (res) {
    4988           0 :                 free(status);
    4989           0 :                 return res;
    4990             :         }
    4991           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4992           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_delete_ns failed!\n");
    4993           0 :                 if (!status->timed_out) {
    4994           0 :                         free(status);
    4995             :                 }
    4996           0 :                 return -ENXIO;
    4997             :         }
    4998           1 :         free(status);
    4999             : 
    5000           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    5001             : }
    5002             : 
    5003             : int
    5004           0 : spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    5005             :                        struct spdk_nvme_format *format)
    5006             : {
    5007             :         struct nvme_completion_poll_status      *status;
    5008             :         int                                     res;
    5009             : 
    5010           0 :         status = calloc(1, sizeof(*status));
    5011           0 :         if (!status) {
    5012           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5013           0 :                 return -ENOMEM;
    5014             :         }
    5015             : 
    5016           0 :         res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
    5017             :                                     status);
    5018           0 :         if (res) {
    5019           0 :                 free(status);
    5020           0 :                 return res;
    5021             :         }
    5022           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5023           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_format failed!\n");
    5024           0 :                 if (!status->timed_out) {
    5025           0 :                         free(status);
    5026             :                 }
    5027           0 :                 return -ENXIO;
    5028             :         }
    5029           0 :         free(status);
    5030             : 
    5031           0 :         return spdk_nvme_ctrlr_reset(ctrlr);
    5032             : }
    5033             : 
    5034             : int
    5035           8 : spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
    5036             :                                 int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
    5037             : {
    5038           8 :         struct spdk_nvme_fw_commit              fw_commit;
    5039             :         struct nvme_completion_poll_status      *status;
    5040             :         int                                     res;
    5041             :         unsigned int                            size_remaining;
    5042             :         unsigned int                            offset;
    5043             :         unsigned int                            transfer;
    5044             :         uint8_t                                 *p;
    5045             : 
    5046           8 :         if (!completion_status) {
    5047           0 :                 return -EINVAL;
    5048             :         }
    5049           8 :         memset(completion_status, 0, sizeof(struct spdk_nvme_status));
    5050           8 :         if (size % 4) {
    5051           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid size!\n");
    5052           1 :                 return -1;
    5053             :         }
    5054             : 
    5055             :         /* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
    5056             :          * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
    5057             :          */
    5058           7 :         if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
    5059             :             (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
    5060           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid command!\n");
    5061           0 :                 return -1;
    5062             :         }
    5063             : 
    5064           7 :         status = calloc(1, sizeof(*status));
    5065           7 :         if (!status) {
    5066           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5067           0 :                 return -ENOMEM;
    5068             :         }
    5069             : 
    5070             :         /* Firmware download */
    5071           7 :         size_remaining = size;
    5072           7 :         offset = 0;
    5073           7 :         p = payload;
    5074             : 
    5075          10 :         while (size_remaining > 0) {
    5076           7 :                 transfer = spdk_min(size_remaining, ctrlr->min_page_size);
    5077             : 
    5078           7 :                 memset(status, 0, sizeof(*status));
    5079           7 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
    5080             :                                                        nvme_completion_poll_cb,
    5081             :                                                        status);
    5082           7 :                 if (res) {
    5083           2 :                         free(status);
    5084           2 :                         return res;
    5085             :                 }
    5086             : 
    5087           5 :                 if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5088           2 :                         NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_fw_image_download failed!\n");
    5089           2 :                         if (!status->timed_out) {
    5090           1 :                                 free(status);
    5091             :                         }
    5092           2 :                         return -ENXIO;
    5093             :                 }
    5094           3 :                 p += transfer;
    5095           3 :                 offset += transfer;
    5096           3 :                 size_remaining -= transfer;
    5097             :         }
    5098             : 
    5099             :         /* Firmware commit */
    5100           3 :         memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5101           3 :         fw_commit.fs = slot;
    5102           3 :         fw_commit.ca = commit_action;
    5103             : 
    5104           3 :         memset(status, 0, sizeof(*status));
    5105           3 :         res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
    5106             :                                        status);
    5107           3 :         if (res) {
    5108           1 :                 free(status);
    5109           1 :                 return res;
    5110             :         }
    5111             : 
    5112           2 :         res = nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
    5113             : 
    5114           2 :         memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
    5115             : 
    5116           2 :         if (!status->timed_out) {
    5117           2 :                 free(status);
    5118             :         }
    5119             : 
    5120           2 :         if (res) {
    5121           1 :                 if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
    5122           0 :                     completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
    5123           1 :                         if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
    5124           0 :                             completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
    5125           0 :                                 NVME_CTRLR_NOTICELOG(ctrlr,
    5126             :                                                      "firmware activation requires conventional reset to be performed. !\n");
    5127             :                         } else {
    5128           1 :                                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5129             :                         }
    5130           1 :                         return -ENXIO;
    5131             :                 }
    5132             :         }
    5133             : 
    5134           1 :         return spdk_nvme_ctrlr_reset(ctrlr);
    5135             : }
    5136             : 
    5137             : int
    5138           0 : spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
    5139             : {
    5140             :         int rc, size;
    5141             :         union spdk_nvme_cmbsz_register cmbsz;
    5142             : 
    5143           0 :         cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
    5144             : 
    5145           0 :         if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
    5146           0 :                 return -ENOTSUP;
    5147             :         }
    5148             : 
    5149           0 :         size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
    5150             : 
    5151           0 :         nvme_ctrlr_lock(ctrlr);
    5152           0 :         rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
    5153           0 :         nvme_ctrlr_unlock(ctrlr);
    5154             : 
    5155           0 :         if (rc < 0) {
    5156           0 :                 return rc;
    5157             :         }
    5158             : 
    5159           0 :         return size;
    5160             : }
    5161             : 
    5162             : void *
    5163           0 : spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5164             : {
    5165             :         void *buf;
    5166             : 
    5167           0 :         nvme_ctrlr_lock(ctrlr);
    5168           0 :         buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
    5169           0 :         nvme_ctrlr_unlock(ctrlr);
    5170             : 
    5171           0 :         return buf;
    5172             : }
    5173             : 
    5174             : void
    5175           0 : spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
    5176             : {
    5177           0 :         nvme_ctrlr_lock(ctrlr);
    5178           0 :         nvme_transport_ctrlr_unmap_cmb(ctrlr);
    5179           0 :         nvme_ctrlr_unlock(ctrlr);
    5180           0 : }
    5181             : 
    5182             : int
    5183           0 : spdk_nvme_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5184             : {
    5185             :         int rc;
    5186             : 
    5187           0 :         nvme_ctrlr_lock(ctrlr);
    5188           0 :         rc = nvme_transport_ctrlr_enable_pmr(ctrlr);
    5189           0 :         nvme_ctrlr_unlock(ctrlr);
    5190             : 
    5191           0 :         return rc;
    5192             : }
    5193             : 
    5194             : int
    5195           0 : spdk_nvme_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5196             : {
    5197             :         int rc;
    5198             : 
    5199           0 :         nvme_ctrlr_lock(ctrlr);
    5200           0 :         rc = nvme_transport_ctrlr_disable_pmr(ctrlr);
    5201           0 :         nvme_ctrlr_unlock(ctrlr);
    5202             : 
    5203           0 :         return rc;
    5204             : }
    5205             : 
    5206             : void *
    5207           0 : spdk_nvme_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5208             : {
    5209             :         void *buf;
    5210             : 
    5211           0 :         nvme_ctrlr_lock(ctrlr);
    5212           0 :         buf = nvme_transport_ctrlr_map_pmr(ctrlr, size);
    5213           0 :         nvme_ctrlr_unlock(ctrlr);
    5214             : 
    5215           0 :         return buf;
    5216             : }
    5217             : 
    5218             : int
    5219           0 : spdk_nvme_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5220             : {
    5221             :         int rc;
    5222             : 
    5223           0 :         nvme_ctrlr_lock(ctrlr);
    5224           0 :         rc = nvme_transport_ctrlr_unmap_pmr(ctrlr);
    5225           0 :         nvme_ctrlr_unlock(ctrlr);
    5226             : 
    5227           0 :         return rc;
    5228             : }
    5229             : 
    5230             : int
    5231           0 : spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *payload,
    5232             :                 uint32_t bprsz, uint32_t bprof, uint32_t bpid)
    5233             : {
    5234           0 :         union spdk_nvme_bprsel_register bprsel;
    5235           0 :         union spdk_nvme_bpinfo_register bpinfo;
    5236           0 :         uint64_t bpmbl, bpmb_size;
    5237             : 
    5238           0 :         if (ctrlr->cap.bits.bps == 0) {
    5239           0 :                 return -ENOTSUP;
    5240             :         }
    5241             : 
    5242           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5243           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5244           0 :                 return -EIO;
    5245             :         }
    5246             : 
    5247           0 :         if (bpinfo.bits.brs == SPDK_NVME_BRS_READ_IN_PROGRESS) {
    5248           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read already initiated\n");
    5249           0 :                 return -EALREADY;
    5250             :         }
    5251             : 
    5252           0 :         nvme_ctrlr_lock(ctrlr);
    5253             : 
    5254           0 :         bpmb_size = bprsz * 4096;
    5255           0 :         bpmbl = spdk_vtophys(payload, &bpmb_size);
    5256           0 :         if (bpmbl == SPDK_VTOPHYS_ERROR) {
    5257           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_vtophys of bpmbl failed\n");
    5258           0 :                 nvme_ctrlr_unlock(ctrlr);
    5259           0 :                 return -EFAULT;
    5260             :         }
    5261             : 
    5262           0 :         if (bpmb_size != bprsz * 4096) {
    5263           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition buffer is not physically contiguous\n");
    5264           0 :                 nvme_ctrlr_unlock(ctrlr);
    5265           0 :                 return -EFAULT;
    5266             :         }
    5267             : 
    5268           0 :         if (nvme_ctrlr_set_bpmbl(ctrlr, bpmbl)) {
    5269           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bpmbl() failed\n");
    5270           0 :                 nvme_ctrlr_unlock(ctrlr);
    5271           0 :                 return -EIO;
    5272             :         }
    5273             : 
    5274           0 :         bprsel.bits.bpid = bpid;
    5275           0 :         bprsel.bits.bprof = bprof;
    5276           0 :         bprsel.bits.bprsz = bprsz;
    5277             : 
    5278           0 :         if (nvme_ctrlr_set_bprsel(ctrlr, &bprsel)) {
    5279           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bprsel() failed\n");
    5280           0 :                 nvme_ctrlr_unlock(ctrlr);
    5281           0 :                 return -EIO;
    5282             :         }
    5283             : 
    5284           0 :         nvme_ctrlr_unlock(ctrlr);
    5285           0 :         return 0;
    5286             : }
    5287             : 
    5288             : int
    5289           0 : spdk_nvme_ctrlr_read_boot_partition_poll(struct spdk_nvme_ctrlr *ctrlr)
    5290             : {
    5291           0 :         int rc = 0;
    5292           0 :         union spdk_nvme_bpinfo_register bpinfo;
    5293             : 
    5294           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5295           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5296           0 :                 return -EIO;
    5297             :         }
    5298             : 
    5299           0 :         switch (bpinfo.bits.brs) {
    5300           0 :         case SPDK_NVME_BRS_NO_READ:
    5301           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read not initiated\n");
    5302           0 :                 rc = -EINVAL;
    5303           0 :                 break;
    5304           0 :         case SPDK_NVME_BRS_READ_IN_PROGRESS:
    5305           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition read in progress\n");
    5306           0 :                 rc = -EAGAIN;
    5307           0 :                 break;
    5308           0 :         case SPDK_NVME_BRS_READ_ERROR:
    5309           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Error completing Boot Partition read\n");
    5310           0 :                 rc = -EIO;
    5311           0 :                 break;
    5312           0 :         case SPDK_NVME_BRS_READ_SUCCESS:
    5313           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Boot Partition read completed successfully\n");
    5314           0 :                 break;
    5315           0 :         default:
    5316           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition read status\n");
    5317           0 :                 rc = -EINVAL;
    5318             :         }
    5319             : 
    5320           0 :         return rc;
    5321             : }
    5322             : 
    5323             : static void
    5324           0 : nvme_write_boot_partition_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    5325             : {
    5326             :         int res;
    5327           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    5328           0 :         struct spdk_nvme_fw_commit fw_commit;
    5329           0 :         struct spdk_nvme_cpl err_cpl =
    5330             :         {.status = {.sct = SPDK_NVME_SCT_GENERIC, .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR }};
    5331             : 
    5332           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    5333           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Write Boot Partition failed\n");
    5334           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5335           0 :                 return;
    5336             :         }
    5337             : 
    5338           0 :         if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADING) {
    5339           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Downloading at Offset %d Success\n", ctrlr->fw_offset);
    5340           0 :                 ctrlr->fw_payload = (uint8_t *)ctrlr->fw_payload + ctrlr->fw_transfer_size;
    5341           0 :                 ctrlr->fw_offset += ctrlr->fw_transfer_size;
    5342           0 :                 ctrlr->fw_size_remaining -= ctrlr->fw_transfer_size;
    5343           0 :                 ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5344           0 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5345             :                                                        ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5346           0 :                 if (res) {
    5347           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_image_download failed!\n");
    5348           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5349           0 :                         return;
    5350             :                 }
    5351             : 
    5352           0 :                 if (ctrlr->fw_transfer_size < ctrlr->min_page_size) {
    5353           0 :                         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADED;
    5354             :                 }
    5355           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADED) {
    5356           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Download Success\n");
    5357           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5358           0 :                 fw_commit.bpid = ctrlr->bpid;
    5359           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_BOOT_PARTITION;
    5360           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5361             :                                                nvme_write_boot_partition_cb, ctrlr);
    5362           0 :                 if (res) {
    5363           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5364           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5365           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5366           0 :                         return;
    5367             :                 }
    5368             : 
    5369           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_REPLACE;
    5370           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_REPLACE) {
    5371           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Replacement Success\n");
    5372           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5373           0 :                 fw_commit.bpid = ctrlr->bpid;
    5374           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_ACTIVATE_BOOT_PARTITION;
    5375           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5376             :                                                nvme_write_boot_partition_cb, ctrlr);
    5377           0 :                 if (res) {
    5378           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5379           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5380           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5381           0 :                         return;
    5382             :                 }
    5383             : 
    5384           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_ACTIVATE;
    5385           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_ACTIVATE) {
    5386           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Activation Success\n");
    5387           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5388             :         } else {
    5389           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition write state\n");
    5390           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5391           0 :                 return;
    5392             :         }
    5393             : }
    5394             : 
    5395             : int
    5396           0 : spdk_nvme_ctrlr_write_boot_partition(struct spdk_nvme_ctrlr *ctrlr,
    5397             :                                      void *payload, uint32_t size, uint32_t bpid,
    5398             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    5399             : {
    5400             :         int res;
    5401             : 
    5402           0 :         if (ctrlr->cap.bits.bps == 0) {
    5403           0 :                 return -ENOTSUP;
    5404             :         }
    5405             : 
    5406           0 :         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADING;
    5407           0 :         ctrlr->bpid = bpid;
    5408           0 :         ctrlr->bp_write_cb_fn = cb_fn;
    5409           0 :         ctrlr->bp_write_cb_arg = cb_arg;
    5410           0 :         ctrlr->fw_offset = 0;
    5411           0 :         ctrlr->fw_size_remaining = size;
    5412           0 :         ctrlr->fw_payload = payload;
    5413           0 :         ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5414             : 
    5415           0 :         res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5416             :                                                ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5417             : 
    5418           0 :         return res;
    5419             : }
    5420             : 
    5421             : bool
    5422          43 : spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
    5423             : {
    5424          43 :         assert(ctrlr);
    5425             : 
    5426          43 :         return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
    5427             :                         strlen(SPDK_NVMF_DISCOVERY_NQN));
    5428             : }
    5429             : 
    5430             : bool
    5431          20 : spdk_nvme_ctrlr_is_fabrics(struct spdk_nvme_ctrlr *ctrlr)
    5432             : {
    5433          20 :         assert(ctrlr);
    5434             : 
    5435          20 :         return spdk_nvme_trtype_is_fabrics(ctrlr->trid.trtype);
    5436             : }
    5437             : 
    5438             : int
    5439           0 : spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5440             :                                  uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5441             : {
    5442             :         struct nvme_completion_poll_status      *status;
    5443             :         int                                     res;
    5444             : 
    5445           0 :         status = calloc(1, sizeof(*status));
    5446           0 :         if (!status) {
    5447           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5448           0 :                 return -ENOMEM;
    5449             :         }
    5450             : 
    5451           0 :         res = spdk_nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
    5452             :                         nvme_completion_poll_cb, status);
    5453           0 :         if (res) {
    5454           0 :                 free(status);
    5455           0 :                 return res;
    5456             :         }
    5457           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5458           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_receive failed!\n");
    5459           0 :                 if (!status->timed_out) {
    5460           0 :                         free(status);
    5461             :                 }
    5462           0 :                 return -ENXIO;
    5463             :         }
    5464           0 :         free(status);
    5465             : 
    5466           0 :         return 0;
    5467             : }
    5468             : 
    5469             : int
    5470           0 : spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5471             :                               uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5472             : {
    5473             :         struct nvme_completion_poll_status      *status;
    5474             :         int                                     res;
    5475             : 
    5476           0 :         status = calloc(1, sizeof(*status));
    5477           0 :         if (!status) {
    5478           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5479           0 :                 return -ENOMEM;
    5480             :         }
    5481             : 
    5482           0 :         res = spdk_nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size,
    5483             :                                                 nvme_completion_poll_cb,
    5484             :                                                 status);
    5485           0 :         if (res) {
    5486           0 :                 free(status);
    5487           0 :                 return res;
    5488             :         }
    5489           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5490           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_send failed!\n");
    5491           0 :                 if (!status->timed_out) {
    5492           0 :                         free(status);
    5493             :                 }
    5494           0 :                 return -ENXIO;
    5495             :         }
    5496             : 
    5497           0 :         free(status);
    5498             : 
    5499           0 :         return 0;
    5500             : }
    5501             : 
    5502             : uint64_t
    5503           1 : spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
    5504             : {
    5505           1 :         return ctrlr->flags;
    5506             : }
    5507             : 
    5508             : const struct spdk_nvme_transport_id *
    5509           0 : spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
    5510             : {
    5511           0 :         return &ctrlr->trid;
    5512             : }
    5513             : 
    5514             : int32_t
    5515          17 : spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
    5516             : {
    5517             :         uint32_t qid;
    5518             : 
    5519          17 :         assert(ctrlr->free_io_qids);
    5520          17 :         nvme_ctrlr_lock(ctrlr);
    5521          17 :         qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
    5522          17 :         if (qid > ctrlr->opts.num_io_queues) {
    5523           2 :                 NVME_CTRLR_ERRLOG(ctrlr, "No free I/O queue IDs\n");
    5524           2 :                 nvme_ctrlr_unlock(ctrlr);
    5525           2 :                 return -1;
    5526             :         }
    5527             : 
    5528          15 :         spdk_bit_array_clear(ctrlr->free_io_qids, qid);
    5529          15 :         nvme_ctrlr_unlock(ctrlr);
    5530          15 :         return qid;
    5531             : }
    5532             : 
    5533             : void
    5534          64 : spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
    5535             : {
    5536          64 :         assert(qid <= ctrlr->opts.num_io_queues);
    5537             : 
    5538          64 :         nvme_ctrlr_lock(ctrlr);
    5539             : 
    5540          64 :         if (spdk_likely(ctrlr->free_io_qids)) {
    5541          64 :                 spdk_bit_array_set(ctrlr->free_io_qids, qid);
    5542             :         }
    5543             : 
    5544          64 :         nvme_ctrlr_unlock(ctrlr);
    5545          64 : }
    5546             : 
    5547             : int
    5548           2 : spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
    5549             :                                    struct spdk_memory_domain **domains, int array_size)
    5550             : {
    5551           2 :         return nvme_transport_ctrlr_get_memory_domains(ctrlr, domains, array_size);
    5552             : }
    5553             : 
    5554             : int
    5555           0 : spdk_nvme_ctrlr_authenticate(struct spdk_nvme_ctrlr *ctrlr,
    5556             :                              spdk_nvme_authenticate_cb cb_fn, void *cb_ctx)
    5557             : {
    5558           0 :         return spdk_nvme_qpair_authenticate(ctrlr->adminq, cb_fn, cb_ctx);
    5559             : }

Generated by: LCOV version 1.15