LCOV - code coverage report
Current view: top level - lib/nvme - nvme_ctrlr.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 1589 2705 58.7 %
Date: 2024-11-16 18:00:23 Functions: 141 209 67.5 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
       3             :  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
       4             :  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       5             :  */
       6             : 
       7             : #include "spdk/stdinc.h"
       8             : 
       9             : #include "nvme_internal.h"
      10             : #include "nvme_io_msg.h"
      11             : 
      12             : #include "spdk/env.h"
      13             : #include "spdk/string.h"
      14             : #include "spdk/endian.h"
      15             : 
      16             : struct nvme_active_ns_ctx;
      17             : 
      18             : static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
      19             :                 struct nvme_async_event_request *aer);
      20             : static void nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx);
      21             : static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
      22             : static int nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns);
      23             : static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
      24             : static void nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr);
      25             : static void nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
      26             :                                  uint64_t timeout_in_ms);
      27             : 
      28             : static int
      29      477891 : nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
      30             : {
      31      477891 :         if (ns1->id < ns2->id) {
      32      164867 :                 return -1;
      33      313024 :         } else if (ns1->id > ns2->id) {
      34      276062 :                 return 1;
      35             :         } else {
      36       36962 :                 return 0;
      37             :         }
      38             : }
      39             : 
      40      599405 : RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
      41             : 
      42             : #define CTRLR_STRING(ctrlr) \
      43             :         ((ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP || ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_RDMA) ? \
      44             :         ctrlr->trid.subnqn : ctrlr->trid.traddr)
      45             : 
      46             : #define NVME_CTRLR_ERRLOG(ctrlr, format, ...) \
      47             :         SPDK_ERRLOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      48             : 
      49             : #define NVME_CTRLR_WARNLOG(ctrlr, format, ...) \
      50             :         SPDK_WARNLOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      51             : 
      52             : #define NVME_CTRLR_NOTICELOG(ctrlr, format, ...) \
      53             :         SPDK_NOTICELOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      54             : 
      55             : #define NVME_CTRLR_INFOLOG(ctrlr, format, ...) \
      56             :         SPDK_INFOLOG(nvme, "[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      57             : 
      58             : #ifdef DEBUG
      59             : #define NVME_CTRLR_DEBUGLOG(ctrlr, format, ...) \
      60             :         SPDK_DEBUGLOG(nvme, "[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
      61             : #else
      62             : #define NVME_CTRLR_DEBUGLOG(ctrlr, ...) do { } while (0)
      63             : #endif
      64             : 
      65             : #define nvme_ctrlr_get_reg_async(ctrlr, reg, sz, cb_fn, cb_arg) \
      66             :         nvme_transport_ctrlr_get_reg_ ## sz ## _async(ctrlr, \
      67             :                 offsetof(struct spdk_nvme_registers, reg), cb_fn, cb_arg)
      68             : 
      69             : #define nvme_ctrlr_set_reg_async(ctrlr, reg, sz, val, cb_fn, cb_arg) \
      70             :         nvme_transport_ctrlr_set_reg_ ## sz ## _async(ctrlr, \
      71             :                 offsetof(struct spdk_nvme_registers, reg), val, cb_fn, cb_arg)
      72             : 
      73             : #define nvme_ctrlr_get_cc_async(ctrlr, cb_fn, cb_arg) \
      74             :         nvme_ctrlr_get_reg_async(ctrlr, cc, 4, cb_fn, cb_arg)
      75             : 
      76             : #define nvme_ctrlr_get_csts_async(ctrlr, cb_fn, cb_arg) \
      77             :         nvme_ctrlr_get_reg_async(ctrlr, csts, 4, cb_fn, cb_arg)
      78             : 
      79             : #define nvme_ctrlr_get_cap_async(ctrlr, cb_fn, cb_arg) \
      80             :         nvme_ctrlr_get_reg_async(ctrlr, cap, 8, cb_fn, cb_arg)
      81             : 
      82             : #define nvme_ctrlr_get_vs_async(ctrlr, cb_fn, cb_arg) \
      83             :         nvme_ctrlr_get_reg_async(ctrlr, vs, 4, cb_fn, cb_arg)
      84             : 
      85             : #define nvme_ctrlr_set_cc_async(ctrlr, value, cb_fn, cb_arg) \
      86             :         nvme_ctrlr_set_reg_async(ctrlr, cc, 4, value, cb_fn, cb_arg)
      87             : 
      88             : static int
      89           0 : nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
      90             : {
      91           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
      92             :                                               &cc->raw);
      93             : }
      94             : 
      95             : static int
      96           0 : nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
      97             : {
      98           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
      99             :                                               &csts->raw);
     100             : }
     101             : 
     102             : int
     103           0 : nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
     104             : {
     105           0 :         return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
     106             :                                               &cap->raw);
     107             : }
     108             : 
     109             : int
     110           1 : nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
     111             : {
     112           1 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
     113             :                                               &vs->raw);
     114             : }
     115             : 
     116             : int
     117           0 : nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
     118             : {
     119           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
     120             :                                               &cmbsz->raw);
     121             : }
     122             : 
     123             : int
     124           0 : nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap)
     125             : {
     126           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
     127             :                                               &pmrcap->raw);
     128             : }
     129             : 
     130             : int
     131           0 : nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo)
     132             : {
     133           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bpinfo.raw),
     134             :                                               &bpinfo->raw);
     135             : }
     136             : 
     137             : int
     138           0 : nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel)
     139             : {
     140           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bprsel.raw),
     141             :                                               bprsel->raw);
     142             : }
     143             : 
     144             : int
     145           0 : nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value)
     146             : {
     147           0 :         return nvme_transport_ctrlr_set_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, bpmbl),
     148             :                                               bpmbl_value);
     149             : }
     150             : 
     151             : static int
     152           0 : nvme_ctrlr_set_nssr(struct spdk_nvme_ctrlr *ctrlr, uint32_t nssr_value)
     153             : {
     154           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, nssr),
     155             :                                               nssr_value);
     156             : }
     157             : 
     158             : bool
     159          33 : nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr)
     160             : {
     161          35 :         return ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS &&
     162           2 :                ctrlr->opts.command_set == SPDK_NVME_CC_CSS_IOCS;
     163             : }
     164             : 
     165             : /* When the field in spdk_nvme_ctrlr_opts are changed and you change this function, please
     166             :  * also update the nvme_ctrl_opts_init function in nvme_ctrlr.c
     167             :  */
     168             : void
     169           2 : spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
     170             : {
     171           2 :         assert(opts);
     172             : 
     173           2 :         opts->opts_size = opts_size;
     174             : 
     175             : #define FIELD_OK(field) \
     176             :         offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
     177             : 
     178             : #define SET_FIELD(field, value) \
     179             :         if (offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size) { \
     180             :                 opts->field = value; \
     181             :         } \
     182             : 
     183           2 :         SET_FIELD(num_io_queues, DEFAULT_MAX_IO_QUEUES);
     184           2 :         SET_FIELD(use_cmb_sqs, false);
     185           2 :         SET_FIELD(no_shn_notification, false);
     186           2 :         SET_FIELD(arb_mechanism, SPDK_NVME_CC_AMS_RR);
     187           2 :         SET_FIELD(arbitration_burst, 0);
     188           2 :         SET_FIELD(low_priority_weight, 0);
     189           2 :         SET_FIELD(medium_priority_weight, 0);
     190           2 :         SET_FIELD(high_priority_weight, 0);
     191           2 :         SET_FIELD(keep_alive_timeout_ms, MIN_KEEP_ALIVE_TIMEOUT_IN_MS);
     192           2 :         SET_FIELD(transport_retry_count, SPDK_NVME_DEFAULT_RETRY_COUNT);
     193           2 :         SET_FIELD(io_queue_size, DEFAULT_IO_QUEUE_SIZE);
     194             : 
     195           2 :         if (nvme_driver_init() == 0) {
     196           2 :                 if (FIELD_OK(hostnqn)) {
     197           1 :                         nvme_get_default_hostnqn(opts->hostnqn, sizeof(opts->hostnqn));
     198             :                 }
     199             : 
     200           2 :                 if (FIELD_OK(extended_host_id)) {
     201           1 :                         memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
     202             :                                sizeof(opts->extended_host_id));
     203             :                 }
     204             : 
     205             :         }
     206             : 
     207           2 :         SET_FIELD(io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
     208             : 
     209           2 :         if (FIELD_OK(src_addr)) {
     210           1 :                 memset(opts->src_addr, 0, sizeof(opts->src_addr));
     211             :         }
     212             : 
     213           2 :         if (FIELD_OK(src_svcid)) {
     214           1 :                 memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
     215             :         }
     216             : 
     217           2 :         if (FIELD_OK(host_id)) {
     218           1 :                 memset(opts->host_id, 0, sizeof(opts->host_id));
     219             :         }
     220             : 
     221           2 :         SET_FIELD(command_set, CHAR_BIT);
     222           2 :         SET_FIELD(admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
     223           2 :         SET_FIELD(header_digest, false);
     224           2 :         SET_FIELD(data_digest, false);
     225           2 :         SET_FIELD(disable_error_logging, false);
     226           2 :         SET_FIELD(transport_ack_timeout, SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT);
     227           2 :         SET_FIELD(admin_queue_size, DEFAULT_ADMIN_QUEUE_SIZE);
     228           2 :         SET_FIELD(fabrics_connect_timeout_us, NVME_FABRIC_CONNECT_COMMAND_TIMEOUT);
     229           2 :         SET_FIELD(disable_read_ana_log_page, false);
     230           2 :         SET_FIELD(disable_read_changed_ns_list_log_page, false);
     231           2 :         SET_FIELD(tls_psk, NULL);
     232           2 :         SET_FIELD(dhchap_key, NULL);
     233           2 :         SET_FIELD(dhchap_ctrlr_key, NULL);
     234           2 :         SET_FIELD(dhchap_digests,
     235             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) |
     236             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) |
     237             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512));
     238           2 :         SET_FIELD(dhchap_dhgroups,
     239             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) |
     240             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) |
     241             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) |
     242             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) |
     243             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) |
     244             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192));
     245             : #undef FIELD_OK
     246             : #undef SET_FIELD
     247           2 : }
     248             : 
     249             : const struct spdk_nvme_ctrlr_opts *
     250           0 : spdk_nvme_ctrlr_get_opts(struct spdk_nvme_ctrlr *ctrlr)
     251             : {
     252           0 :         return &ctrlr->opts;
     253             : }
     254             : 
     255             : /**
     256             :  * This function will be called when the process allocates the IO qpair.
     257             :  * Note: the ctrlr_lock must be held when calling this function.
     258             :  */
     259             : static void
     260          15 : nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
     261             : {
     262             :         struct spdk_nvme_ctrlr_process  *active_proc;
     263          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     264             : 
     265          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     266          15 :         if (active_proc) {
     267           0 :                 TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
     268           0 :                 qpair->active_proc = active_proc;
     269             :         }
     270          15 : }
     271             : 
     272             : /**
     273             :  * This function will be called when the process frees the IO qpair.
     274             :  * Note: the ctrlr_lock must be held when calling this function.
     275             :  */
     276             : static void
     277          15 : nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
     278             : {
     279             :         struct spdk_nvme_ctrlr_process  *active_proc;
     280          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     281             :         struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
     282             : 
     283          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     284          15 :         if (!active_proc) {
     285          15 :                 return;
     286             :         }
     287             : 
     288           0 :         TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
     289             :                            per_process_tailq, tmp_qpair) {
     290           0 :                 if (active_qpair == qpair) {
     291           0 :                         TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
     292             :                                      active_qpair, per_process_tailq);
     293             : 
     294           0 :                         break;
     295             :                 }
     296             :         }
     297             : }
     298             : 
     299             : void
     300          27 : spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
     301             :                 struct spdk_nvme_io_qpair_opts *opts,
     302             :                 size_t opts_size)
     303             : {
     304          27 :         assert(ctrlr);
     305             : 
     306          27 :         assert(opts);
     307             : 
     308          27 :         memset(opts, 0, opts_size);
     309             : 
     310             : #define FIELD_OK(field) \
     311             :         offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
     312             : 
     313          27 :         if (FIELD_OK(qprio)) {
     314          27 :                 opts->qprio = SPDK_NVME_QPRIO_URGENT;
     315             :         }
     316             : 
     317          27 :         if (FIELD_OK(io_queue_size)) {
     318          27 :                 opts->io_queue_size = ctrlr->opts.io_queue_size;
     319             :         }
     320             : 
     321          27 :         if (FIELD_OK(io_queue_requests)) {
     322          26 :                 opts->io_queue_requests = ctrlr->opts.io_queue_requests;
     323             :         }
     324             : 
     325          27 :         if (FIELD_OK(delay_cmd_submit)) {
     326          26 :                 opts->delay_cmd_submit = false;
     327             :         }
     328             : 
     329          27 :         if (FIELD_OK(sq.vaddr)) {
     330          26 :                 opts->sq.vaddr = NULL;
     331             :         }
     332             : 
     333          27 :         if (FIELD_OK(sq.paddr)) {
     334          26 :                 opts->sq.paddr = 0;
     335             :         }
     336             : 
     337          27 :         if (FIELD_OK(sq.buffer_size)) {
     338          26 :                 opts->sq.buffer_size = 0;
     339             :         }
     340             : 
     341          27 :         if (FIELD_OK(cq.vaddr)) {
     342          26 :                 opts->cq.vaddr = NULL;
     343             :         }
     344             : 
     345          27 :         if (FIELD_OK(cq.paddr)) {
     346          26 :                 opts->cq.paddr = 0;
     347             :         }
     348             : 
     349          27 :         if (FIELD_OK(cq.buffer_size)) {
     350          26 :                 opts->cq.buffer_size = 0;
     351             :         }
     352             : 
     353          27 :         if (FIELD_OK(create_only)) {
     354          26 :                 opts->create_only = false;
     355             :         }
     356             : 
     357          27 :         if (FIELD_OK(async_mode)) {
     358          26 :                 opts->async_mode = false;
     359             :         }
     360             : 
     361             : #undef FIELD_OK
     362          27 : }
     363             : 
     364             : static struct spdk_nvme_qpair *
     365          22 : nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     366             :                            const struct spdk_nvme_io_qpair_opts *opts)
     367             : {
     368             :         int32_t                                 qid;
     369             :         struct spdk_nvme_qpair                  *qpair;
     370             :         union spdk_nvme_cc_register             cc;
     371             : 
     372          22 :         if (!ctrlr) {
     373           0 :                 return NULL;
     374             :         }
     375             : 
     376          22 :         nvme_ctrlr_lock(ctrlr);
     377          22 :         cc.raw = ctrlr->process_init_cc.raw;
     378             : 
     379          22 :         if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
     380           2 :                 nvme_ctrlr_unlock(ctrlr);
     381           2 :                 return NULL;
     382             :         }
     383             : 
     384             :         /*
     385             :          * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
     386             :          * default round robin arbitration method.
     387             :          */
     388          20 :         if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
     389           3 :                 NVME_CTRLR_ERRLOG(ctrlr, "invalid queue priority for default round robin arbitration method\n");
     390           3 :                 nvme_ctrlr_unlock(ctrlr);
     391           3 :                 return NULL;
     392             :         }
     393             : 
     394          17 :         qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
     395          17 :         if (qid < 0) {
     396           2 :                 nvme_ctrlr_unlock(ctrlr);
     397           2 :                 return NULL;
     398             :         }
     399             : 
     400          15 :         qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
     401          15 :         if (qpair == NULL) {
     402           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_create_io_qpair() failed\n");
     403           0 :                 spdk_nvme_ctrlr_free_qid(ctrlr, qid);
     404           0 :                 nvme_ctrlr_unlock(ctrlr);
     405           0 :                 return NULL;
     406             :         }
     407             : 
     408          15 :         TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
     409             : 
     410          15 :         nvme_ctrlr_proc_add_io_qpair(qpair);
     411             : 
     412          15 :         nvme_ctrlr_unlock(ctrlr);
     413             : 
     414          15 :         return qpair;
     415             : }
     416             : 
     417             : int
     418          15 : spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
     419             : {
     420             :         int rc;
     421             : 
     422          15 :         if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
     423           0 :                 return -EISCONN;
     424             :         }
     425             : 
     426          15 :         nvme_ctrlr_lock(ctrlr);
     427          15 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     428          15 :         nvme_ctrlr_unlock(ctrlr);
     429             : 
     430          15 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
     431           0 :                 spdk_delay_us(100);
     432             :         }
     433             : 
     434          15 :         return rc;
     435             : }
     436             : 
     437             : void
     438           0 : spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     439             : {
     440           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     441             : 
     442           0 :         nvme_ctrlr_lock(ctrlr);
     443           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     444           0 :         nvme_ctrlr_unlock(ctrlr);
     445           0 : }
     446             : 
     447             : struct spdk_nvme_qpair *
     448          23 : spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     449             :                                const struct spdk_nvme_io_qpair_opts *user_opts,
     450             :                                size_t opts_size)
     451             : {
     452             : 
     453          23 :         struct spdk_nvme_qpair          *qpair = NULL;
     454          23 :         struct spdk_nvme_io_qpair_opts  opts;
     455             :         int                             rc;
     456             : 
     457          23 :         nvme_ctrlr_lock(ctrlr);
     458             : 
     459          23 :         if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
     460             :                 /* When controller is resetting or initializing, free_io_qids is deleted or not created yet.
     461             :                  * We can't create IO qpair in that case */
     462           1 :                 goto unlock;
     463             :         }
     464             : 
     465             :         /*
     466             :          * Get the default options, then overwrite them with the user-provided options
     467             :          * up to opts_size.
     468             :          *
     469             :          * This allows for extensions of the opts structure without breaking
     470             :          * ABI compatibility.
     471             :          */
     472          22 :         spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
     473          22 :         if (user_opts) {
     474          18 :                 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
     475             : 
     476             :                 /* If user passes buffers, make sure they're big enough for the requested queue size */
     477          18 :                 if (opts.sq.vaddr) {
     478           0 :                         if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
     479           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "sq buffer size %" PRIx64 " is too small for sq size %zx\n",
     480             :                                                   opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
     481           0 :                                 goto unlock;
     482             :                         }
     483             :                 }
     484          18 :                 if (opts.cq.vaddr) {
     485           0 :                         if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
     486           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "cq buffer size %" PRIx64 " is too small for cq size %zx\n",
     487             :                                                   opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
     488           0 :                                 goto unlock;
     489             :                         }
     490             :                 }
     491             :         }
     492             : 
     493          22 :         qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
     494             : 
     495          22 :         if (qpair == NULL || opts.create_only == true) {
     496           7 :                 goto unlock;
     497             :         }
     498             : 
     499          15 :         rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
     500          15 :         if (rc != 0) {
     501           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_connect_io_qpair() failed\n");
     502           1 :                 nvme_ctrlr_proc_remove_io_qpair(qpair);
     503           1 :                 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     504           1 :                 spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
     505           1 :                 nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     506           1 :                 qpair = NULL;
     507           1 :                 goto unlock;
     508             :         }
     509             : 
     510          23 : unlock:
     511          23 :         nvme_ctrlr_unlock(ctrlr);
     512             : 
     513          23 :         return qpair;
     514             : }
     515             : 
     516             : int
     517           8 : spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     518             : {
     519             :         struct spdk_nvme_ctrlr *ctrlr;
     520             :         enum nvme_qpair_state qpair_state;
     521             :         int rc;
     522             : 
     523           8 :         assert(qpair != NULL);
     524           8 :         assert(nvme_qpair_is_admin_queue(qpair) == false);
     525           8 :         assert(qpair->ctrlr != NULL);
     526             : 
     527           8 :         ctrlr = qpair->ctrlr;
     528           8 :         nvme_ctrlr_lock(ctrlr);
     529           8 :         qpair_state = nvme_qpair_get_state(qpair);
     530             : 
     531           8 :         if (ctrlr->is_removed) {
     532           2 :                 rc = -ENODEV;
     533           2 :                 goto out;
     534             :         }
     535             : 
     536           6 :         if (ctrlr->is_resetting || qpair_state == NVME_QPAIR_DISCONNECTING) {
     537           2 :                 rc = -EAGAIN;
     538           2 :                 goto out;
     539             :         }
     540             : 
     541           4 :         if (ctrlr->is_failed || qpair_state == NVME_QPAIR_DESTROYING) {
     542           2 :                 rc = -ENXIO;
     543           2 :                 goto out;
     544             :         }
     545             : 
     546           2 :         if (qpair_state != NVME_QPAIR_DISCONNECTED) {
     547           1 :                 rc = 0;
     548           1 :                 goto out;
     549             :         }
     550             : 
     551           1 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     552           1 :         if (rc) {
     553           0 :                 rc = -EAGAIN;
     554           0 :                 goto out;
     555             :         }
     556             : 
     557           1 : out:
     558           8 :         nvme_ctrlr_unlock(ctrlr);
     559           8 :         return rc;
     560             : }
     561             : 
     562             : spdk_nvme_qp_failure_reason
     563           0 : spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
     564             : {
     565           0 :         return ctrlr->adminq->transport_failure_reason;
     566             : }
     567             : 
     568             : /*
     569             :  * This internal function will attempt to take the controller
     570             :  * lock before calling disconnect on a controller qpair.
     571             :  * Functions already holding the controller lock should
     572             :  * call nvme_transport_ctrlr_disconnect_qpair directly.
     573             :  */
     574             : void
     575           0 : nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
     576             : {
     577           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     578             : 
     579           0 :         assert(ctrlr != NULL);
     580           0 :         nvme_ctrlr_lock(ctrlr);
     581           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     582           0 :         nvme_ctrlr_unlock(ctrlr);
     583           0 : }
     584             : 
     585             : int
     586          14 : spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
     587             : {
     588             :         struct spdk_nvme_ctrlr *ctrlr;
     589             : 
     590          14 :         if (qpair == NULL) {
     591           0 :                 return 0;
     592             :         }
     593             : 
     594          14 :         ctrlr = qpair->ctrlr;
     595             : 
     596          14 :         if (qpair->in_completion_context) {
     597             :                 /*
     598             :                  * There are many cases where it is convenient to delete an io qpair in the context
     599             :                  *  of that qpair's completion routine.  To handle this properly, set a flag here
     600             :                  *  so that the completion routine will perform an actual delete after the context
     601             :                  *  unwinds.
     602             :                  */
     603           0 :                 qpair->delete_after_completion_context = 1;
     604           0 :                 return 0;
     605             :         }
     606             : 
     607          14 :         if (qpair->auth.cb_fn != NULL) {
     608           0 :                 qpair->auth.cb_fn(qpair->auth.cb_ctx, -ECANCELED);
     609           0 :                 qpair->auth.cb_fn = NULL;
     610             :         }
     611             : 
     612          14 :         qpair->destroy_in_progress = 1;
     613             : 
     614          14 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     615             : 
     616          14 :         if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
     617           0 :                 spdk_nvme_poll_group_remove(qpair->poll_group->group, qpair);
     618             :         }
     619             : 
     620             :         /* Do not retry. */
     621          14 :         nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
     622             : 
     623             :         /* In the multi-process case, a process may call this function on a foreign
     624             :          * I/O qpair (i.e. one that this process did not create) when that qpairs process
     625             :          * exits unexpectedly.  In that case, we must not try to abort any reqs associated
     626             :          * with that qpair, since the callbacks will also be foreign to this process.
     627             :          */
     628          14 :         if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
     629          14 :                 nvme_qpair_abort_all_queued_reqs(qpair);
     630             :         }
     631             : 
     632          14 :         nvme_ctrlr_lock(ctrlr);
     633             : 
     634          14 :         nvme_ctrlr_proc_remove_io_qpair(qpair);
     635             : 
     636          14 :         TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     637          14 :         spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);
     638             : 
     639          14 :         nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     640          14 :         nvme_ctrlr_unlock(ctrlr);
     641          14 :         return 0;
     642             : }
     643             : 
     644             : static void
     645           3 : nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
     646             :                 struct spdk_nvme_intel_log_page_directory *log_page_directory)
     647             : {
     648           3 :         if (log_page_directory == NULL) {
     649           0 :                 return;
     650             :         }
     651             : 
     652           3 :         assert(ctrlr->cdata.vid == SPDK_PCI_VID_INTEL);
     653             : 
     654           3 :         ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
     655             : 
     656           3 :         if (log_page_directory->read_latency_log_len ||
     657           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
     658           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
     659             :         }
     660           3 :         if (log_page_directory->write_latency_log_len ||
     661           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
     662           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
     663             :         }
     664           3 :         if (log_page_directory->temperature_statistics_log_len) {
     665           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
     666             :         }
     667           3 :         if (log_page_directory->smart_log_len) {
     668           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
     669             :         }
     670           3 :         if (log_page_directory->marketing_description_log_len) {
     671           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
     672             :         }
     673             : }
     674             : 
     675             : struct intel_log_pages_ctx {
     676             :         struct spdk_nvme_intel_log_page_directory log_page_directory;
     677             :         struct spdk_nvme_ctrlr *ctrlr;
     678             : };
     679             : 
     680             : static void
     681           1 : nvme_ctrlr_set_intel_support_log_pages_done(void *arg, const struct spdk_nvme_cpl *cpl)
     682             : {
     683           1 :         struct intel_log_pages_ctx *ctx = arg;
     684           1 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
     685             : 
     686           1 :         if (!spdk_nvme_cpl_is_error(cpl)) {
     687           1 :                 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, &ctx->log_page_directory);
     688             :         }
     689             : 
     690           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     691           1 :                              ctrlr->opts.admin_timeout_ms);
     692           1 :         free(ctx);
     693           1 : }
     694             : 
     695             : static int
     696           1 : nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     697             : {
     698           1 :         int rc = 0;
     699             :         struct intel_log_pages_ctx *ctx;
     700             : 
     701           1 :         ctx = calloc(1, sizeof(*ctx));
     702           1 :         if (!ctx) {
     703           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     704           0 :                                      ctrlr->opts.admin_timeout_ms);
     705           0 :                 return 0;
     706             :         }
     707             : 
     708           1 :         ctx->ctrlr = ctrlr;
     709             : 
     710           1 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
     711           1 :                                               SPDK_NVME_GLOBAL_NS_TAG, &ctx->log_page_directory,
     712             :                                               sizeof(struct spdk_nvme_intel_log_page_directory),
     713             :                                               0, nvme_ctrlr_set_intel_support_log_pages_done, ctx);
     714           1 :         if (rc != 0) {
     715           0 :                 free(ctx);
     716           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     717           0 :                                      ctrlr->opts.admin_timeout_ms);
     718           0 :                 return 0;
     719             :         }
     720             : 
     721           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
     722           1 :                              ctrlr->opts.admin_timeout_ms);
     723             : 
     724           1 :         return 0;
     725             : }
     726             : 
     727             : static int
     728           4 : nvme_ctrlr_alloc_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     729             : {
     730             :         uint32_t ana_log_page_size;
     731             : 
     732           4 :         ana_log_page_size = sizeof(struct spdk_nvme_ana_page) + ctrlr->cdata.nanagrpid *
     733           4 :                             sizeof(struct spdk_nvme_ana_group_descriptor) + ctrlr->active_ns_count *
     734             :                             sizeof(uint32_t);
     735             : 
     736             :         /* Number of active namespaces may have changed.
     737             :          * Check if ANA log page fits into existing buffer.
     738             :          */
     739           4 :         if (ana_log_page_size > ctrlr->ana_log_page_size) {
     740             :                 void *new_buffer;
     741             : 
     742           4 :                 if (ctrlr->ana_log_page) {
     743           1 :                         new_buffer = realloc(ctrlr->ana_log_page, ana_log_page_size);
     744             :                 } else {
     745           3 :                         new_buffer = calloc(1, ana_log_page_size);
     746             :                 }
     747             : 
     748           4 :                 if (!new_buffer) {
     749           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate ANA log page buffer, size %u\n",
     750             :                                           ana_log_page_size);
     751           0 :                         return -ENXIO;
     752             :                 }
     753             : 
     754           4 :                 ctrlr->ana_log_page = new_buffer;
     755           4 :                 if (ctrlr->copied_ana_desc) {
     756           1 :                         new_buffer = realloc(ctrlr->copied_ana_desc, ana_log_page_size);
     757             :                 } else {
     758           3 :                         new_buffer = calloc(1, ana_log_page_size);
     759             :                 }
     760             : 
     761           4 :                 if (!new_buffer) {
     762           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate a buffer to parse ANA descriptor, size %u\n",
     763             :                                           ana_log_page_size);
     764           0 :                         return -ENOMEM;
     765             :                 }
     766             : 
     767           4 :                 ctrlr->copied_ana_desc = new_buffer;
     768           4 :                 ctrlr->ana_log_page_size = ana_log_page_size;
     769             :         }
     770             : 
     771           4 :         return 0;
     772             : }
     773             : 
     774             : static int
     775           4 : nvme_ctrlr_update_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     776             : {
     777             :         struct nvme_completion_poll_status *status;
     778             :         int rc;
     779             : 
     780           4 :         rc = nvme_ctrlr_alloc_ana_log_page(ctrlr);
     781           4 :         if (rc != 0) {
     782           0 :                 return rc;
     783             :         }
     784             : 
     785           4 :         status = calloc(1, sizeof(*status));
     786           4 :         if (status == NULL) {
     787           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     788           0 :                 return -ENOMEM;
     789             :         }
     790             : 
     791           4 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
     792           4 :                                               SPDK_NVME_GLOBAL_NS_TAG, ctrlr->ana_log_page,
     793             :                                               ctrlr->ana_log_page_size, 0,
     794             :                                               nvme_completion_poll_cb, status);
     795           4 :         if (rc != 0) {
     796           0 :                 free(status);
     797           0 :                 return rc;
     798             :         }
     799             : 
     800           4 :         if (nvme_wait_for_completion_robust_lock_timeout(ctrlr->adminq, status, &ctrlr->ctrlr_lock,
     801           4 :                         ctrlr->opts.admin_timeout_ms * 1000)) {
     802           0 :                 if (!status->timed_out) {
     803           0 :                         free(status);
     804             :                 }
     805           0 :                 return -EIO;
     806             :         }
     807             : 
     808           4 :         free(status);
     809           4 :         return 0;
     810             : }
     811             : 
     812             : static int
     813           5 : nvme_ctrlr_update_ns_ana_states(const struct spdk_nvme_ana_group_descriptor *desc,
     814             :                                 void *cb_arg)
     815             : {
     816           5 :         struct spdk_nvme_ctrlr *ctrlr = cb_arg;
     817             :         struct spdk_nvme_ns *ns;
     818             :         uint32_t i, nsid;
     819             : 
     820          14 :         for (i = 0; i < desc->num_of_nsid; i++) {
     821           9 :                 nsid = desc->nsid[i];
     822           9 :                 if (nsid == 0 || nsid > ctrlr->cdata.nn) {
     823           0 :                         continue;
     824             :                 }
     825             : 
     826           9 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
     827           9 :                 assert(ns != NULL);
     828             : 
     829           9 :                 ns->ana_group_id = desc->ana_group_id;
     830           9 :                 ns->ana_state = desc->ana_state;
     831             :         }
     832             : 
     833           5 :         return 0;
     834             : }
     835             : 
     836             : int
     837           4 : nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
     838             :                               spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg)
     839             : {
     840             :         struct spdk_nvme_ana_group_descriptor *copied_desc;
     841             :         uint8_t *orig_desc;
     842             :         uint32_t i, desc_size, copy_len;
     843           4 :         int rc = 0;
     844             : 
     845           4 :         if (ctrlr->ana_log_page == NULL) {
     846           0 :                 return -EINVAL;
     847             :         }
     848             : 
     849           4 :         copied_desc = ctrlr->copied_ana_desc;
     850             : 
     851           4 :         orig_desc = (uint8_t *)ctrlr->ana_log_page + sizeof(struct spdk_nvme_ana_page);
     852           4 :         copy_len = ctrlr->ana_log_page_size - sizeof(struct spdk_nvme_ana_page);
     853             : 
     854           9 :         for (i = 0; i < ctrlr->ana_log_page->num_ana_group_desc; i++) {
     855           5 :                 memcpy(copied_desc, orig_desc, copy_len);
     856             : 
     857           5 :                 rc = cb_fn(copied_desc, cb_arg);
     858           5 :                 if (rc != 0) {
     859           0 :                         break;
     860             :                 }
     861             : 
     862           5 :                 desc_size = sizeof(struct spdk_nvme_ana_group_descriptor) +
     863           5 :                             copied_desc->num_of_nsid * sizeof(uint32_t);
     864           5 :                 orig_desc += desc_size;
     865           5 :                 copy_len -= desc_size;
     866             :         }
     867             : 
     868           4 :         return rc;
     869             : }
     870             : 
     871             : static int
     872          16 : nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     873             : {
     874          16 :         int     rc = 0;
     875             : 
     876          16 :         memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
     877             :         /* Mandatory pages */
     878          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
     879          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
     880          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
     881          16 :         if (ctrlr->cdata.lpa.celp) {
     882           1 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
     883             :         }
     884             : 
     885          16 :         if (ctrlr->cdata.cmic.ana_reporting) {
     886           2 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] = true;
     887           2 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
     888           2 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
     889           2 :                         if (rc == 0) {
     890           2 :                                 nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
     891             :                                                               ctrlr);
     892             :                         }
     893             :                 }
     894             :         }
     895             : 
     896          16 :         if (ctrlr->cdata.ctratt.bits.fdps) {
     897           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_CONFIGURATIONS] = true;
     898           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_RECLAIM_UNIT_HANDLE_USAGE] = true;
     899           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_STATISTICS] = true;
     900           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_EVENTS] = true;
     901             :         }
     902             : 
     903          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL &&
     904           1 :             ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
     905           1 :             !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
     906           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
     907           1 :                                      ctrlr->opts.admin_timeout_ms);
     908             : 
     909             :         } else {
     910          15 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     911          15 :                                      ctrlr->opts.admin_timeout_ms);
     912             : 
     913             :         }
     914             : 
     915          16 :         return rc;
     916             : }
     917             : 
     918             : static void
     919           1 : nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     920             : {
     921           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
     922           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
     923           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
     924           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
     925           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
     926           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
     927           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
     928           1 : }
     929             : 
     930             : static void
     931          18 : nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
     932             : {
     933             :         uint32_t cdw11;
     934             :         struct nvme_completion_poll_status *status;
     935             : 
     936          18 :         if (ctrlr->opts.arbitration_burst == 0) {
     937          16 :                 return;
     938             :         }
     939             : 
     940           2 :         if (ctrlr->opts.arbitration_burst > 7) {
     941           1 :                 NVME_CTRLR_WARNLOG(ctrlr, "Valid arbitration burst values is from 0-7\n");
     942           1 :                 return;
     943             :         }
     944             : 
     945           1 :         status = calloc(1, sizeof(*status));
     946           1 :         if (!status) {
     947           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     948           0 :                 return;
     949             :         }
     950             : 
     951           1 :         cdw11 = ctrlr->opts.arbitration_burst;
     952             : 
     953           1 :         if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
     954           1 :                 cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
     955           1 :                 cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
     956           1 :                 cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
     957             :         }
     958             : 
     959           1 :         if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
     960             :                                             cdw11, 0, NULL, 0,
     961             :                                             nvme_completion_poll_cb, status) < 0) {
     962           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set arbitration feature failed\n");
     963           0 :                 free(status);
     964           0 :                 return;
     965             :         }
     966             : 
     967           1 :         if (nvme_wait_for_completion_timeout(ctrlr->adminq, status,
     968           1 :                                              ctrlr->opts.admin_timeout_ms * 1000)) {
     969           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Timeout to set arbitration feature\n");
     970             :         }
     971             : 
     972           1 :         if (!status->timed_out) {
     973           1 :                 free(status);
     974             :         }
     975             : }
     976             : 
     977             : static void
     978          16 : nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     979             : {
     980          16 :         memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
     981             :         /* Mandatory features */
     982          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
     983          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
     984          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
     985          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
     986          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
     987          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
     988          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
     989          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
     990          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
     991             :         /* Optional features */
     992          16 :         if (ctrlr->cdata.vwc.present) {
     993           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
     994             :         }
     995          16 :         if (ctrlr->cdata.apsta.supported) {
     996           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
     997             :         }
     998          16 :         if (ctrlr->cdata.hmpre) {
     999           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
    1000             :         }
    1001          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
    1002           1 :                 nvme_ctrlr_set_intel_supported_features(ctrlr);
    1003             :         }
    1004             : 
    1005          16 :         nvme_ctrlr_set_arbitration_feature(ctrlr);
    1006          16 : }
    1007             : 
    1008             : static void
    1009           1 : nvme_ctrlr_set_host_feature_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1010             : {
    1011           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1012             : 
    1013           1 :         spdk_free(ctrlr->tmp_ptr);
    1014           1 :         ctrlr->tmp_ptr = NULL;
    1015             : 
    1016           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1017           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: SC %x SCT %x\n",
    1018             :                                   cpl->status.sc, cpl->status.sct);
    1019           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1020           0 :                 return;
    1021             :         }
    1022             : 
    1023           1 :         ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] = true;
    1024             : 
    1025           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    1026           1 :                              ctrlr->opts.admin_timeout_ms);
    1027             : }
    1028             : 
    1029             : /* We do not want to do add synchronous operation anymore.
    1030             :  * We set the Host Behavior Support feature asynchronousin in different states.
    1031             :  */
    1032             : static int
    1033          16 : nvme_ctrlr_set_host_feature(struct spdk_nvme_ctrlr *ctrlr)
    1034             : {
    1035             :         struct spdk_nvme_host_behavior *host;
    1036             :         int rc;
    1037             : 
    1038          16 :         if (!ctrlr->cdata.ctratt.bits.elbas) {
    1039          15 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    1040          15 :                                      ctrlr->opts.admin_timeout_ms);
    1041          15 :                 return 0;
    1042             :         }
    1043             : 
    1044           1 :         ctrlr->tmp_ptr = spdk_dma_zmalloc(sizeof(struct spdk_nvme_host_behavior), 4096, NULL);
    1045           1 :         if (!ctrlr->tmp_ptr) {
    1046           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate host behavior support data\n");
    1047           0 :                 rc = -ENOMEM;
    1048           0 :                 goto error;
    1049             :         }
    1050             : 
    1051           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE,
    1052           1 :                              ctrlr->opts.admin_timeout_ms);
    1053             : 
    1054           1 :         host = ctrlr->tmp_ptr;
    1055             : 
    1056           1 :         host->lbafee = 1;
    1057             : 
    1058           1 :         rc = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT,
    1059             :                                              0, 0, host, sizeof(struct spdk_nvme_host_behavior),
    1060             :                                              nvme_ctrlr_set_host_feature_done, ctrlr);
    1061           1 :         if (rc != 0) {
    1062           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: %d\n", rc);
    1063           0 :                 goto error;
    1064             :         }
    1065             : 
    1066           1 :         return 0;
    1067             : 
    1068           0 : error:
    1069           0 :         spdk_free(ctrlr->tmp_ptr);
    1070           0 :         ctrlr->tmp_ptr = NULL;
    1071             : 
    1072           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1073           0 :         return rc;
    1074             : }
    1075             : 
    1076             : bool
    1077           0 : spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
    1078             : {
    1079           0 :         return ctrlr->is_failed;
    1080             : }
    1081             : 
    1082             : void
    1083           1 : nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
    1084             : {
    1085             :         /*
    1086             :          * Set the flag here and leave the work failure of qpairs to
    1087             :          * spdk_nvme_qpair_process_completions().
    1088             :          */
    1089           1 :         if (hot_remove) {
    1090           0 :                 ctrlr->is_removed = true;
    1091             :         }
    1092             : 
    1093           1 :         if (ctrlr->is_failed) {
    1094           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "already in failed state\n");
    1095           0 :                 return;
    1096             :         }
    1097             : 
    1098           1 :         if (ctrlr->is_disconnecting) {
    1099           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "already disconnecting\n");
    1100           0 :                 return;
    1101             :         }
    1102             : 
    1103           1 :         ctrlr->is_failed = true;
    1104           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1105           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1106           1 :         NVME_CTRLR_ERRLOG(ctrlr, "in failed state.\n");
    1107             : }
    1108             : 
    1109             : /**
    1110             :  * This public API function will try to take the controller lock.
    1111             :  * Any private functions being called from a thread already holding
    1112             :  * the ctrlr lock should call nvme_ctrlr_fail directly.
    1113             :  */
    1114             : void
    1115           0 : spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
    1116             : {
    1117           0 :         nvme_ctrlr_lock(ctrlr);
    1118           0 :         nvme_ctrlr_fail(ctrlr, false);
    1119           0 :         nvme_ctrlr_unlock(ctrlr);
    1120           0 : }
    1121             : 
    1122             : static void
    1123          39 : nvme_ctrlr_shutdown_set_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1124             : {
    1125          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1126          39 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1127             : 
    1128          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1129           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1130           0 :                 ctx->shutdown_complete = true;
    1131           0 :                 return;
    1132             :         }
    1133             : 
    1134          39 :         if (ctrlr->opts.no_shn_notification) {
    1135           0 :                 ctx->shutdown_complete = true;
    1136           0 :                 return;
    1137             :         }
    1138             : 
    1139             :         /*
    1140             :          * The NVMe specification defines RTD3E to be the time between
    1141             :          *  setting SHN = 1 until the controller will set SHST = 10b.
    1142             :          * If the device doesn't report RTD3 entry latency, or if it
    1143             :          *  reports RTD3 entry latency less than 10 seconds, pick
    1144             :          *  10 seconds as a reasonable amount of time to
    1145             :          *  wait before proceeding.
    1146             :          */
    1147          39 :         NVME_CTRLR_DEBUGLOG(ctrlr, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
    1148          39 :         ctx->shutdown_timeout_ms = SPDK_CEIL_DIV(ctrlr->cdata.rtd3e, 1000);
    1149          39 :         ctx->shutdown_timeout_ms = spdk_max(ctx->shutdown_timeout_ms, 10000);
    1150          39 :         NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown timeout = %" PRIu32 " ms\n", ctx->shutdown_timeout_ms);
    1151             : 
    1152          39 :         ctx->shutdown_start_tsc = spdk_get_ticks();
    1153          39 :         ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1154             : }
    1155             : 
    1156             : static void
    1157          39 : nvme_ctrlr_shutdown_get_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1158             : {
    1159          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1160          39 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1161             :         union spdk_nvme_cc_register cc;
    1162             :         int rc;
    1163             : 
    1164          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1165           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1166           0 :                 ctx->shutdown_complete = true;
    1167           0 :                 return;
    1168             :         }
    1169             : 
    1170          39 :         assert(value <= UINT32_MAX);
    1171          39 :         cc.raw = (uint32_t)value;
    1172             : 
    1173          39 :         if (ctrlr->opts.no_shn_notification) {
    1174           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Disable SSD without shutdown notification\n");
    1175           0 :                 if (cc.bits.en == 0) {
    1176           0 :                         ctx->shutdown_complete = true;
    1177           0 :                         return;
    1178             :                 }
    1179             : 
    1180           0 :                 cc.bits.en = 0;
    1181             :         } else {
    1182          39 :                 cc.bits.shn = SPDK_NVME_SHN_NORMAL;
    1183             :         }
    1184             : 
    1185          39 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_shutdown_set_cc_done, ctx);
    1186          39 :         if (rc != 0) {
    1187           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1188           0 :                 ctx->shutdown_complete = true;
    1189             :         }
    1190             : }
    1191             : 
    1192             : static void
    1193          47 : nvme_ctrlr_shutdown_async(struct spdk_nvme_ctrlr *ctrlr,
    1194             :                           struct nvme_ctrlr_detach_ctx *ctx)
    1195             : {
    1196             :         int rc;
    1197             : 
    1198          47 :         if (ctrlr->is_removed) {
    1199           0 :                 ctx->shutdown_complete = true;
    1200           0 :                 return;
    1201             :         }
    1202             : 
    1203          47 :         if (ctrlr->adminq == NULL ||
    1204          40 :             ctrlr->adminq->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
    1205           8 :                 NVME_CTRLR_INFOLOG(ctrlr, "Adminq is not connected.\n");
    1206           8 :                 ctx->shutdown_complete = true;
    1207           8 :                 return;
    1208             :         }
    1209             : 
    1210          39 :         ctx->state = NVME_CTRLR_DETACH_SET_CC;
    1211          39 :         rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_shutdown_get_cc_done, ctx);
    1212          39 :         if (rc != 0) {
    1213           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1214           0 :                 ctx->shutdown_complete = true;
    1215             :         }
    1216             : }
    1217             : 
    1218             : static void
    1219          39 : nvme_ctrlr_shutdown_get_csts_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1220             : {
    1221          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1222             : 
    1223          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1224           0 :                 NVME_CTRLR_ERRLOG(ctx->ctrlr, "Failed to read the CSTS register\n");
    1225           0 :                 ctx->shutdown_complete = true;
    1226           0 :                 return;
    1227             :         }
    1228             : 
    1229          39 :         assert(value <= UINT32_MAX);
    1230          39 :         ctx->csts.raw = (uint32_t)value;
    1231          39 :         ctx->state = NVME_CTRLR_DETACH_GET_CSTS_DONE;
    1232             : }
    1233             : 
    1234             : static int
    1235          78 : nvme_ctrlr_shutdown_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    1236             :                                struct nvme_ctrlr_detach_ctx *ctx)
    1237             : {
    1238             :         union spdk_nvme_csts_register   csts;
    1239             :         uint32_t                        ms_waited;
    1240             : 
    1241          78 :         switch (ctx->state) {
    1242           0 :         case NVME_CTRLR_DETACH_SET_CC:
    1243             :         case NVME_CTRLR_DETACH_GET_CSTS:
    1244             :                 /* We're still waiting for the register operation to complete */
    1245           0 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    1246           0 :                 return -EAGAIN;
    1247             : 
    1248          39 :         case NVME_CTRLR_DETACH_CHECK_CSTS:
    1249          39 :                 ctx->state = NVME_CTRLR_DETACH_GET_CSTS;
    1250          39 :                 if (nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_shutdown_get_csts_done, ctx)) {
    1251           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    1252           0 :                         return -EIO;
    1253             :                 }
    1254          39 :                 return -EAGAIN;
    1255             : 
    1256          39 :         case NVME_CTRLR_DETACH_GET_CSTS_DONE:
    1257          39 :                 ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1258          39 :                 break;
    1259             : 
    1260           0 :         default:
    1261           0 :                 assert(0 && "Should never happen");
    1262             :                 return -EINVAL;
    1263             :         }
    1264             : 
    1265          39 :         ms_waited = (spdk_get_ticks() - ctx->shutdown_start_tsc) * 1000 / spdk_get_ticks_hz();
    1266          39 :         csts.raw = ctx->csts.raw;
    1267             : 
    1268          39 :         if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
    1269          39 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown complete in %u milliseconds\n", ms_waited);
    1270          39 :                 return 0;
    1271             :         }
    1272             : 
    1273           0 :         if (ms_waited < ctx->shutdown_timeout_ms) {
    1274           0 :                 return -EAGAIN;
    1275             :         }
    1276             : 
    1277           0 :         NVME_CTRLR_ERRLOG(ctrlr, "did not shutdown within %u milliseconds\n",
    1278             :                           ctx->shutdown_timeout_ms);
    1279           0 :         if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
    1280           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
    1281             :         }
    1282             : 
    1283           0 :         return 0;
    1284             : }
    1285             : 
    1286             : static inline uint64_t
    1287         509 : nvme_ctrlr_get_ready_timeout(struct spdk_nvme_ctrlr *ctrlr)
    1288             : {
    1289         509 :         return ctrlr->cap.bits.to * 500;
    1290             : }
    1291             : 
    1292             : static void
    1293          14 : nvme_ctrlr_set_cc_en_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1294             : {
    1295          14 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    1296             : 
    1297          14 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1298           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to set the CC register\n");
    1299           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1300           0 :                 return;
    1301             :         }
    1302             : 
    1303          14 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    1304             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    1305             : }
    1306             : 
    1307             : static int
    1308          21 : nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
    1309             : {
    1310             :         union spdk_nvme_cc_register     cc;
    1311             :         int                             rc;
    1312             : 
    1313          21 :         rc = nvme_transport_ctrlr_enable(ctrlr);
    1314          21 :         if (rc != 0) {
    1315           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "transport ctrlr_enable failed\n");
    1316           0 :                 return rc;
    1317             :         }
    1318             : 
    1319          21 :         cc.raw = ctrlr->process_init_cc.raw;
    1320          21 :         if (cc.bits.en != 0) {
    1321           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "called with CC.EN = 1\n");
    1322           0 :                 return -EINVAL;
    1323             :         }
    1324             : 
    1325          21 :         cc.bits.en = 1;
    1326          21 :         cc.bits.css = 0;
    1327          21 :         cc.bits.shn = 0;
    1328          21 :         cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
    1329          21 :         cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
    1330             : 
    1331             :         /* Page size is 2 ^ (12 + mps). */
    1332          21 :         cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
    1333             : 
    1334             :         /*
    1335             :          * Since NVMe 1.0, a controller should have at least one bit set in CAP.CSS.
    1336             :          * A controller that does not have any bit set in CAP.CSS is not spec compliant.
    1337             :          * Try to support such a controller regardless.
    1338             :          */
    1339          21 :         if (ctrlr->cap.bits.css == 0) {
    1340          21 :                 NVME_CTRLR_INFOLOG(ctrlr, "Drive reports no command sets supported. Assuming NVM is supported.\n");
    1341          21 :                 ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
    1342             :         }
    1343             : 
    1344             :         /*
    1345             :          * If the user did not explicitly request a command set, or supplied a value larger than
    1346             :          * what can be saved in CC.CSS, use the most reasonable default.
    1347             :          */
    1348          21 :         if (ctrlr->opts.command_set >= CHAR_BIT) {
    1349           0 :                 if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) {
    1350           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_IOCS;
    1351           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NVM) {
    1352           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1353           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NOIO) {
    1354             :                         /* Technically we should respond with CC_CSS_NOIO in
    1355             :                          * this case, but we use NVM instead to work around
    1356             :                          * buggy targets and to match Linux driver behavior.
    1357             :                          */
    1358           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1359             :                 } else {
    1360             :                         /* Invalid supported bits detected, falling back to NVM. */
    1361           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1362             :                 }
    1363             :         }
    1364             : 
    1365             :         /* Verify that the selected command set is supported by the controller. */
    1366          21 :         if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
    1367           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Requested I/O command set %u but supported mask is 0x%x\n",
    1368             :                                     ctrlr->opts.command_set, ctrlr->cap.bits.css);
    1369           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Falling back to NVM. Assuming NVM is supported.\n");
    1370           0 :                 ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1371             :         }
    1372             : 
    1373          21 :         cc.bits.css = ctrlr->opts.command_set;
    1374             : 
    1375          21 :         switch (ctrlr->opts.arb_mechanism) {
    1376          10 :         case SPDK_NVME_CC_AMS_RR:
    1377          10 :                 break;
    1378           4 :         case SPDK_NVME_CC_AMS_WRR:
    1379           4 :                 if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
    1380           2 :                         break;
    1381             :                 }
    1382           2 :                 return -EINVAL;
    1383           4 :         case SPDK_NVME_CC_AMS_VS:
    1384           4 :                 if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
    1385           2 :                         break;
    1386             :                 }
    1387           2 :                 return -EINVAL;
    1388           3 :         default:
    1389           3 :                 return -EINVAL;
    1390             :         }
    1391             : 
    1392          14 :         cc.bits.ams = ctrlr->opts.arb_mechanism;
    1393          14 :         ctrlr->process_init_cc.raw = cc.raw;
    1394             : 
    1395          14 :         if (nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_set_cc_en_done, ctrlr)) {
    1396           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    1397           0 :                 return -EIO;
    1398             :         }
    1399             : 
    1400          14 :         return 0;
    1401             : }
    1402             : 
    1403             : static const char *
    1404           1 : nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
    1405             : {
    1406           1 :         switch (state) {
    1407           0 :         case NVME_CTRLR_STATE_INIT_DELAY:
    1408           0 :                 return "delay init";
    1409           0 :         case NVME_CTRLR_STATE_CONNECT_ADMINQ:
    1410           0 :                 return "connect adminq";
    1411           0 :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    1412           0 :                 return "wait for connect adminq";
    1413           0 :         case NVME_CTRLR_STATE_READ_VS:
    1414           0 :                 return "read vs";
    1415           0 :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    1416           0 :                 return "read vs wait for vs";
    1417           0 :         case NVME_CTRLR_STATE_READ_CAP:
    1418           0 :                 return "read cap";
    1419           0 :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    1420           0 :                 return "read cap wait for cap";
    1421           0 :         case NVME_CTRLR_STATE_CHECK_EN:
    1422           0 :                 return "check en";
    1423           0 :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    1424           0 :                 return "check en wait for cc";
    1425           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    1426           0 :                 return "disable and wait for CSTS.RDY = 1";
    1427           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1428           0 :                 return "disable and wait for CSTS.RDY = 1 reg";
    1429           0 :         case NVME_CTRLR_STATE_SET_EN_0:
    1430           0 :                 return "set CC.EN = 0";
    1431           0 :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    1432           0 :                 return "set CC.EN = 0 wait for cc";
    1433           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    1434           0 :                 return "disable and wait for CSTS.RDY = 0";
    1435           0 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    1436           0 :                 return "disable and wait for CSTS.RDY = 0 reg";
    1437           0 :         case NVME_CTRLR_STATE_DISABLED:
    1438           0 :                 return "controller is disabled";
    1439           0 :         case NVME_CTRLR_STATE_ENABLE:
    1440           0 :                 return "enable controller by writing CC.EN = 1";
    1441           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    1442           0 :                 return "enable controller by writing CC.EN = 1 reg";
    1443           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    1444           0 :                 return "wait for CSTS.RDY = 1";
    1445           0 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1446           0 :                 return "wait for CSTS.RDY = 1 reg";
    1447           0 :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    1448           0 :                 return "reset admin queue";
    1449           0 :         case NVME_CTRLR_STATE_IDENTIFY:
    1450           0 :                 return "identify controller";
    1451           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    1452           0 :                 return "wait for identify controller";
    1453           0 :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    1454           0 :                 return "configure AER";
    1455           0 :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    1456           0 :                 return "wait for configure aer";
    1457           0 :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    1458           0 :                 return "set keep alive timeout";
    1459           0 :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    1460           0 :                 return "wait for set keep alive timeout";
    1461           0 :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    1462           0 :                 return "identify controller iocs specific";
    1463           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    1464           0 :                 return "wait for identify controller iocs specific";
    1465           0 :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    1466           0 :                 return "get zns cmd and effects log page";
    1467           0 :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    1468           0 :                 return "wait for get zns cmd and effects log page";
    1469           0 :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    1470           0 :                 return "set number of queues";
    1471           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    1472           0 :                 return "wait for set number of queues";
    1473           0 :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    1474           0 :                 return "identify active ns";
    1475           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    1476           0 :                 return "wait for identify active ns";
    1477           0 :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    1478           0 :                 return "identify ns";
    1479           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    1480           0 :                 return "wait for identify ns";
    1481           0 :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    1482           0 :                 return "identify namespace id descriptors";
    1483           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    1484           0 :                 return "wait for identify namespace id descriptors";
    1485           0 :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    1486           0 :                 return "identify ns iocs specific";
    1487           0 :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    1488           0 :                 return "wait for identify ns iocs specific";
    1489           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    1490           0 :                 return "set supported log pages";
    1491           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    1492           0 :                 return "set supported INTEL log pages";
    1493           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    1494           0 :                 return "wait for supported INTEL log pages";
    1495           0 :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    1496           0 :                 return "set supported features";
    1497           0 :         case NVME_CTRLR_STATE_SET_HOST_FEATURE:
    1498           0 :                 return "set host behavior support feature";
    1499           0 :         case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
    1500           0 :                 return "wait for set host behavior support feature";
    1501           0 :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    1502           0 :                 return "set doorbell buffer config";
    1503           0 :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    1504           0 :                 return "wait for doorbell buffer config";
    1505           0 :         case NVME_CTRLR_STATE_SET_HOST_ID:
    1506           0 :                 return "set host ID";
    1507           0 :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    1508           0 :                 return "wait for set host ID";
    1509           0 :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    1510           0 :                 return "transport ready";
    1511           0 :         case NVME_CTRLR_STATE_READY:
    1512           0 :                 return "ready";
    1513           1 :         case NVME_CTRLR_STATE_ERROR:
    1514           1 :                 return "error";
    1515           0 :         case NVME_CTRLR_STATE_DISCONNECTED:
    1516           0 :                 return "disconnected";
    1517             :         }
    1518           0 :         return "unknown";
    1519             : };
    1520             : 
    1521             : static void
    1522         732 : _nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1523             :                       uint64_t timeout_in_ms, bool quiet)
    1524             : {
    1525             :         uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
    1526             : 
    1527         732 :         ctrlr->state = state;
    1528         732 :         if (timeout_in_ms == NVME_TIMEOUT_KEEP_EXISTING) {
    1529          33 :                 if (!quiet) {
    1530           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (keeping existing timeout)\n",
    1531             :                                             nvme_ctrlr_state_string(ctrlr->state));
    1532             :                 }
    1533          33 :                 return;
    1534             :         }
    1535             : 
    1536         699 :         if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
    1537         697 :                 goto inf;
    1538             :         }
    1539             : 
    1540           2 :         ticks_per_ms = spdk_get_ticks_hz() / 1000;
    1541           2 :         if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
    1542           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1543             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1544           0 :                 goto inf;
    1545             :         }
    1546             : 
    1547           2 :         now_ticks = spdk_get_ticks();
    1548           2 :         timeout_in_ticks = timeout_in_ms * ticks_per_ms;
    1549           2 :         if (timeout_in_ticks > UINT64_MAX - now_ticks) {
    1550           1 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1551             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1552           1 :                 goto inf;
    1553             :         }
    1554             : 
    1555           1 :         ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
    1556           1 :         if (!quiet) {
    1557           1 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (timeout %" PRIu64 " ms)\n",
    1558             :                                     nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
    1559             :         }
    1560           1 :         return;
    1561         698 : inf:
    1562         698 :         if (!quiet) {
    1563         698 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (no timeout)\n",
    1564             :                                     nvme_ctrlr_state_string(ctrlr->state));
    1565             :         }
    1566         698 :         ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
    1567             : }
    1568             : 
    1569             : static void
    1570         699 : nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1571             :                      uint64_t timeout_in_ms)
    1572             : {
    1573         699 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, false);
    1574         699 : }
    1575             : 
    1576             : static void
    1577          33 : nvme_ctrlr_set_state_quiet(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1578             :                            uint64_t timeout_in_ms)
    1579             : {
    1580          33 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, true);
    1581          33 : }
    1582             : 
    1583             : static void
    1584          48 : nvme_ctrlr_free_zns_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1585             : {
    1586          48 :         spdk_free(ctrlr->cdata_zns);
    1587          48 :         ctrlr->cdata_zns = NULL;
    1588          48 : }
    1589             : 
    1590             : static void
    1591          48 : nvme_ctrlr_free_iocs_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1592             : {
    1593          48 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    1594          48 : }
    1595             : 
    1596             : static void
    1597          49 : nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
    1598             : {
    1599          49 :         if (ctrlr->shadow_doorbell) {
    1600           1 :                 spdk_free(ctrlr->shadow_doorbell);
    1601           1 :                 ctrlr->shadow_doorbell = NULL;
    1602             :         }
    1603             : 
    1604          49 :         if (ctrlr->eventidx) {
    1605           1 :                 spdk_free(ctrlr->eventidx);
    1606           1 :                 ctrlr->eventidx = NULL;
    1607             :         }
    1608          49 : }
    1609             : 
    1610             : static void
    1611           1 : nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1612             : {
    1613           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1614             : 
    1615           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1616           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Doorbell buffer config failed\n");
    1617             :         } else {
    1618           1 :                 NVME_CTRLR_INFOLOG(ctrlr, "Doorbell buffer config enabled\n");
    1619             :         }
    1620           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1621           1 :                              ctrlr->opts.admin_timeout_ms);
    1622           1 : }
    1623             : 
    1624             : static int
    1625          15 : nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
    1626             : {
    1627          15 :         int rc = 0;
    1628          15 :         uint64_t prp1, prp2, len;
    1629             : 
    1630          15 :         if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
    1631          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1632          14 :                                      ctrlr->opts.admin_timeout_ms);
    1633          14 :                 return 0;
    1634             :         }
    1635             : 
    1636           1 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    1637           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1638           0 :                                      ctrlr->opts.admin_timeout_ms);
    1639           0 :                 return 0;
    1640             :         }
    1641             : 
    1642             :         /* only 1 page size for doorbell buffer */
    1643           1 :         ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1644             :                                               NULL, SPDK_ENV_LCORE_ID_ANY,
    1645             :                                               SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1646           1 :         if (ctrlr->shadow_doorbell == NULL) {
    1647           0 :                 rc = -ENOMEM;
    1648           0 :                 goto error;
    1649             :         }
    1650             : 
    1651           1 :         len = ctrlr->page_size;
    1652           1 :         prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
    1653           1 :         if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1654           0 :                 rc = -EFAULT;
    1655           0 :                 goto error;
    1656             :         }
    1657             : 
    1658           1 :         ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1659             :                                        NULL, SPDK_ENV_LCORE_ID_ANY,
    1660             :                                        SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1661           1 :         if (ctrlr->eventidx == NULL) {
    1662           0 :                 rc = -ENOMEM;
    1663           0 :                 goto error;
    1664             :         }
    1665             : 
    1666           1 :         len = ctrlr->page_size;
    1667           1 :         prp2 = spdk_vtophys(ctrlr->eventidx, &len);
    1668           1 :         if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1669           0 :                 rc = -EFAULT;
    1670           0 :                 goto error;
    1671             :         }
    1672             : 
    1673           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
    1674           1 :                              ctrlr->opts.admin_timeout_ms);
    1675             : 
    1676           1 :         rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
    1677             :                         nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
    1678           1 :         if (rc != 0) {
    1679           0 :                 goto error;
    1680             :         }
    1681             : 
    1682           1 :         return 0;
    1683             : 
    1684           0 : error:
    1685           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1686           0 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1687           0 :         return rc;
    1688             : }
    1689             : 
    1690             : void
    1691          48 : nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr)
    1692             : {
    1693             :         struct nvme_request     *req, *tmp;
    1694          48 :         struct spdk_nvme_cpl    cpl = {};
    1695             : 
    1696          48 :         cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
    1697          48 :         cpl.status.sct = SPDK_NVME_SCT_GENERIC;
    1698             : 
    1699          48 :         STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
    1700           0 :                 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
    1701           0 :                 ctrlr->outstanding_aborts++;
    1702             : 
    1703           0 :                 nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, &cpl);
    1704             :         }
    1705          48 : }
    1706             : 
    1707             : static int
    1708           2 : nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1709             : {
    1710           2 :         if (ctrlr->is_resetting || ctrlr->is_removed) {
    1711             :                 /*
    1712             :                  * Controller is already resetting or has been removed. Return
    1713             :                  *  immediately since there is no need to kick off another
    1714             :                  *  reset in these cases.
    1715             :                  */
    1716           1 :                 return ctrlr->is_resetting ? -EBUSY : -ENXIO;
    1717             :         }
    1718             : 
    1719           1 :         ctrlr->is_resetting = true;
    1720           1 :         ctrlr->is_failed = false;
    1721           1 :         ctrlr->is_disconnecting = true;
    1722           1 :         ctrlr->prepare_for_reset = true;
    1723             : 
    1724           1 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting controller\n");
    1725             : 
    1726             :         /* Disable keep-alive, it'll be re-enabled as part of the init process */
    1727           1 :         ctrlr->keep_alive_interval_ticks = 0;
    1728             : 
    1729             :         /* Abort all of the queued abort requests */
    1730           1 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    1731             : 
    1732           1 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    1733             : 
    1734           1 :         ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1735           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1736             : 
    1737           1 :         return 0;
    1738             : }
    1739             : 
    1740             : static void
    1741           1 : nvme_ctrlr_disconnect_done(struct spdk_nvme_ctrlr *ctrlr)
    1742             : {
    1743           1 :         assert(ctrlr->is_failed == false);
    1744           1 :         ctrlr->is_disconnecting = false;
    1745             : 
    1746             :         /* Doorbell buffer config is invalid during reset */
    1747           1 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1748             : 
    1749             :         /* I/O Command Set Specific Identify Controller data is invalidated during reset */
    1750           1 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    1751             : 
    1752           1 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    1753             : 
    1754             :         /* Set the state back to DISCONNECTED to cause a full hardware reset. */
    1755           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISCONNECTED, NVME_TIMEOUT_INFINITE);
    1756           1 : }
    1757             : 
    1758             : int
    1759           0 : spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1760             : {
    1761             :         int rc;
    1762             : 
    1763           0 :         nvme_ctrlr_lock(ctrlr);
    1764           0 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1765           0 :         nvme_ctrlr_unlock(ctrlr);
    1766             : 
    1767           0 :         return rc;
    1768             : }
    1769             : 
    1770             : void
    1771           1 : spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
    1772             : {
    1773           1 :         nvme_ctrlr_lock(ctrlr);
    1774             : 
    1775           1 :         ctrlr->prepare_for_reset = false;
    1776             : 
    1777             :         /* Set the state back to INIT to cause a full hardware reset. */
    1778           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    1779             : 
    1780             :         /* Return without releasing ctrlr_lock. ctrlr_lock will be released when
    1781             :          * spdk_nvme_ctrlr_reset_poll_async() returns 0.
    1782             :          */
    1783           1 : }
    1784             : 
    1785             : int
    1786           0 : nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
    1787             : {
    1788             :         bool async;
    1789             :         int rc;
    1790             : 
    1791           0 :         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc ||
    1792           0 :             spdk_nvme_ctrlr_is_fabrics(ctrlr) || nvme_qpair_is_admin_queue(qpair)) {
    1793           0 :                 assert(false);
    1794             :                 return -EINVAL;
    1795             :         }
    1796             : 
    1797             :         /* Force a synchronous connect. */
    1798           0 :         async = qpair->async;
    1799           0 :         qpair->async = false;
    1800           0 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
    1801           0 :         qpair->async = async;
    1802             : 
    1803           0 :         if (rc != 0) {
    1804           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1805             :         }
    1806             : 
    1807           0 :         return rc;
    1808             : }
    1809             : 
    1810             : /**
    1811             :  * This function will be called when the controller is being reinitialized.
    1812             :  * Note: the ctrlr_lock must be held when calling this function.
    1813             :  */
    1814             : int
    1815          25 : spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
    1816             : {
    1817             :         struct spdk_nvme_ns *ns, *tmp_ns;
    1818             :         struct spdk_nvme_qpair  *qpair;
    1819          25 :         int rc = 0, rc_tmp = 0;
    1820             : 
    1821          25 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1822           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "controller reinitialization failed\n");
    1823           0 :                 rc = -1;
    1824             :         }
    1825          25 :         if (ctrlr->state != NVME_CTRLR_STATE_READY && rc != -1) {
    1826          24 :                 return -EAGAIN;
    1827             :         }
    1828             : 
    1829             :         /*
    1830             :          * For non-fabrics controllers, the memory locations of the transport qpair
    1831             :          * don't change when the controller is reset. They simply need to be
    1832             :          * re-enabled with admin commands to the controller. For fabric
    1833             :          * controllers we need to disconnect and reconnect the qpair on its
    1834             :          * own thread outside of the context of the reset.
    1835             :          */
    1836           1 :         if (rc == 0 && !spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    1837             :                 /* Reinitialize qpairs */
    1838           1 :                 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1839             :                         /* Always clear the qid bit here, even for a foreign qpair. We need
    1840             :                          * to make sure another process doesn't get the chance to grab that
    1841             :                          * qid.
    1842             :                          */
    1843           0 :                         assert(spdk_bit_array_get(ctrlr->free_io_qids, qpair->id));
    1844           0 :                         spdk_bit_array_clear(ctrlr->free_io_qids, qpair->id);
    1845           0 :                         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc) {
    1846             :                                 /*
    1847             :                                  * We cannot reinitialize a foreign qpair. The qpair's owning
    1848             :                                  * process will take care of it. Set failure reason to FAILURE_RESET
    1849             :                                  * to ensure that happens.
    1850             :                                  */
    1851           0 :                                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_RESET;
    1852           0 :                                 continue;
    1853             :                         }
    1854           0 :                         rc_tmp = nvme_ctrlr_reinitialize_io_qpair(ctrlr, qpair);
    1855           0 :                         if (rc_tmp != 0) {
    1856           0 :                                 rc = rc_tmp;
    1857             :                         }
    1858             :                 }
    1859             :         }
    1860             : 
    1861             :         /*
    1862             :          * Take this opportunity to remove inactive namespaces. During a reset namespace
    1863             :          * handles can be invalidated.
    1864             :          */
    1865           5 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    1866           4 :                 if (!ns->active) {
    1867           1 :                         RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    1868           1 :                         spdk_free(ns);
    1869             :                 }
    1870             :         }
    1871             : 
    1872           1 :         if (rc) {
    1873           0 :                 nvme_ctrlr_fail(ctrlr, false);
    1874             :         }
    1875           1 :         ctrlr->is_resetting = false;
    1876             : 
    1877           1 :         nvme_ctrlr_unlock(ctrlr);
    1878             : 
    1879           1 :         if (!ctrlr->cdata.oaes.ns_attribute_notices) {
    1880             :                 /*
    1881             :                  * If controller doesn't support ns_attribute_notices and
    1882             :                  * namespace attributes change (e.g. number of namespaces)
    1883             :                  * we need to update system handling device reset.
    1884             :                  */
    1885           1 :                 nvme_io_msg_ctrlr_update(ctrlr);
    1886             :         }
    1887             : 
    1888           1 :         return rc;
    1889             : }
    1890             : 
    1891             : /*
    1892             :  * For PCIe transport, spdk_nvme_ctrlr_disconnect() will do a Controller Level Reset
    1893             :  * (Change CC.EN from 1 to 0) as a operation to disconnect the admin qpair.
    1894             :  * The following two functions are added to do a Controller Level Reset. They have
    1895             :  * to be called under the nvme controller's lock.
    1896             :  */
    1897             : void
    1898           1 : nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
    1899             : {
    1900           1 :         assert(ctrlr->is_disconnecting == true);
    1901             : 
    1902           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    1903           1 : }
    1904             : 
    1905             : int
    1906           2 : nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr)
    1907             : {
    1908           2 :         int rc = 0;
    1909             : 
    1910           2 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1911           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to disable controller\n");
    1912           0 :                 rc = -1;
    1913             :         }
    1914             : 
    1915           2 :         if (ctrlr->state != NVME_CTRLR_STATE_DISABLED && rc != -1) {
    1916           1 :                 return -EAGAIN;
    1917             :         }
    1918             : 
    1919           1 :         return rc;
    1920             : }
    1921             : 
    1922             : static void
    1923           1 : nvme_ctrlr_fail_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
    1924             : {
    1925             :         struct spdk_nvme_qpair  *qpair;
    1926             : 
    1927           1 :         TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1928           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1929             :         }
    1930           1 : }
    1931             : 
    1932             : int
    1933           2 : spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
    1934             : {
    1935             :         int rc;
    1936             : 
    1937           2 :         nvme_ctrlr_lock(ctrlr);
    1938             : 
    1939           2 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1940           2 :         if (rc == 0) {
    1941           1 :                 nvme_ctrlr_fail_io_qpairs(ctrlr);
    1942             :         }
    1943             : 
    1944           2 :         nvme_ctrlr_unlock(ctrlr);
    1945             : 
    1946           2 :         if (rc != 0) {
    1947           1 :                 if (rc == -EBUSY) {
    1948           1 :                         rc = 0;
    1949             :                 }
    1950           1 :                 return rc;
    1951             :         }
    1952             : 
    1953             :         while (1) {
    1954           1 :                 rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr);
    1955           1 :                 if (rc == -ENXIO) {
    1956           1 :                         break;
    1957             :                 }
    1958             :         }
    1959             : 
    1960           1 :         spdk_nvme_ctrlr_reconnect_async(ctrlr);
    1961             : 
    1962             :         while (true) {
    1963          25 :                 rc = spdk_nvme_ctrlr_reconnect_poll_async(ctrlr);
    1964          25 :                 if (rc != -EAGAIN) {
    1965           1 :                         break;
    1966             :                 }
    1967             :         }
    1968             : 
    1969           1 :         return rc;
    1970             : }
    1971             : 
    1972             : int
    1973           0 : spdk_nvme_ctrlr_reset_subsystem(struct spdk_nvme_ctrlr *ctrlr)
    1974             : {
    1975             :         union spdk_nvme_cap_register cap;
    1976           0 :         int rc = 0;
    1977             : 
    1978           0 :         cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
    1979           0 :         if (cap.bits.nssrs == 0) {
    1980           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "subsystem reset is not supported\n");
    1981           0 :                 return -ENOTSUP;
    1982             :         }
    1983             : 
    1984           0 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting subsystem\n");
    1985           0 :         nvme_ctrlr_lock(ctrlr);
    1986           0 :         ctrlr->is_resetting = true;
    1987           0 :         rc = nvme_ctrlr_set_nssr(ctrlr, SPDK_NVME_NSSR_VALUE);
    1988           0 :         ctrlr->is_resetting = false;
    1989             : 
    1990           0 :         nvme_ctrlr_unlock(ctrlr);
    1991             :         /*
    1992             :          * No more cleanup at this point like in the ctrlr reset. A subsystem reset will cause
    1993             :          * a hot remove for PCIe transport. The hot remove handling does all the necessary ctrlr cleanup.
    1994             :          */
    1995           0 :         return rc;
    1996             : }
    1997             : 
    1998             : int
    1999           4 : spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
    2000             : {
    2001           4 :         int rc = 0;
    2002             : 
    2003           4 :         nvme_ctrlr_lock(ctrlr);
    2004             : 
    2005           4 :         if (ctrlr->is_failed == false) {
    2006           1 :                 rc = -EPERM;
    2007           1 :                 goto out;
    2008             :         }
    2009             : 
    2010           3 :         if (trid->trtype != ctrlr->trid.trtype) {
    2011           1 :                 rc = -EINVAL;
    2012           1 :                 goto out;
    2013             :         }
    2014             : 
    2015           2 :         if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
    2016           1 :                 rc = -EINVAL;
    2017           1 :                 goto out;
    2018             :         }
    2019             : 
    2020           1 :         ctrlr->trid = *trid;
    2021             : 
    2022           4 : out:
    2023           4 :         nvme_ctrlr_unlock(ctrlr);
    2024           4 :         return rc;
    2025             : }
    2026             : 
    2027             : void
    2028           0 : spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
    2029             :                               spdk_nvme_remove_cb remove_cb, void *remove_ctx)
    2030             : {
    2031           0 :         if (!spdk_process_is_primary()) {
    2032           0 :                 return;
    2033             :         }
    2034             : 
    2035           0 :         nvme_ctrlr_lock(ctrlr);
    2036           0 :         ctrlr->remove_cb = remove_cb;
    2037           0 :         ctrlr->cb_ctx = remove_ctx;
    2038           0 :         nvme_ctrlr_unlock(ctrlr);
    2039             : }
    2040             : 
    2041             : int
    2042           0 : spdk_nvme_ctrlr_set_keys(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts)
    2043             : {
    2044           0 :         nvme_ctrlr_lock(ctrlr);
    2045           0 :         if (SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key) == NULL &&
    2046           0 :             SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key) != NULL) {
    2047           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "DH-HMAC-CHAP controller key requires host key to be set\n");
    2048           0 :                 nvme_ctrlr_unlock(ctrlr);
    2049           0 :                 return -EINVAL;
    2050             :         }
    2051             : 
    2052           0 :         ctrlr->opts.dhchap_key =
    2053           0 :                 SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key);
    2054           0 :         ctrlr->opts.dhchap_ctrlr_key =
    2055           0 :                 SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key);
    2056           0 :         nvme_ctrlr_unlock(ctrlr);
    2057             : 
    2058           0 :         return 0;
    2059             : }
    2060             : 
    2061             : static void
    2062          16 : nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2063             : {
    2064          16 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2065             : 
    2066          16 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2067           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_identify_controller failed!\n");
    2068           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2069           0 :                 return;
    2070             :         }
    2071             : 
    2072             :         /*
    2073             :          * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
    2074             :          *  controller supports.
    2075             :          */
    2076          16 :         ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
    2077          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
    2078          16 :         if (ctrlr->cdata.mdts > 0) {
    2079           0 :                 ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
    2080             :                                                 ctrlr->min_page_size * (1 << ctrlr->cdata.mdts));
    2081           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
    2082             :         }
    2083             : 
    2084          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
    2085          16 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    2086           1 :                 ctrlr->cntlid = ctrlr->cdata.cntlid;
    2087             :         } else {
    2088             :                 /*
    2089             :                  * Fabrics controllers should already have CNTLID from the Connect command.
    2090             :                  *
    2091             :                  * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
    2092             :                  * trust the one from Connect.
    2093             :                  */
    2094          15 :                 if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
    2095           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
    2096             :                                             ctrlr->cdata.cntlid, ctrlr->cntlid);
    2097             :                 }
    2098             :         }
    2099             : 
    2100          16 :         if (ctrlr->cdata.sgls.supported && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2101           0 :                 assert(ctrlr->cdata.sgls.supported != 0x3);
    2102           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
    2103           0 :                 if (ctrlr->cdata.sgls.supported == 0x2) {
    2104           0 :                         ctrlr->flags |= SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT;
    2105             :                 }
    2106             : 
    2107           0 :                 ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
    2108           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_sges %u\n", ctrlr->max_sges);
    2109             :         }
    2110             : 
    2111          16 :         if (ctrlr->cdata.sgls.metadata_address && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2112           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED;
    2113             :         }
    2114             : 
    2115          16 :         if (ctrlr->cdata.oacs.security && !(ctrlr->quirks & NVME_QUIRK_OACS_SECURITY)) {
    2116           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
    2117             :         }
    2118             : 
    2119          16 :         if (ctrlr->cdata.oacs.directives) {
    2120           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED;
    2121             :         }
    2122             : 
    2123          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "fuses compare and write: %d\n",
    2124             :                             ctrlr->cdata.fuses.compare_and_write);
    2125          16 :         if (ctrlr->cdata.fuses.compare_and_write) {
    2126           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED;
    2127             :         }
    2128             : 
    2129          16 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
    2130          16 :                              ctrlr->opts.admin_timeout_ms);
    2131             : }
    2132             : 
    2133             : static int
    2134          16 : nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
    2135             : {
    2136             :         int     rc;
    2137             : 
    2138          16 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
    2139          16 :                              ctrlr->opts.admin_timeout_ms);
    2140             : 
    2141          16 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0, 0,
    2142          16 :                                      &ctrlr->cdata, sizeof(ctrlr->cdata),
    2143             :                                      nvme_ctrlr_identify_done, ctrlr);
    2144          16 :         if (rc != 0) {
    2145           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2146           0 :                 return rc;
    2147             :         }
    2148             : 
    2149          16 :         return 0;
    2150             : }
    2151             : 
    2152             : static void
    2153           0 : nvme_ctrlr_get_zns_cmd_and_effects_log_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2154             : {
    2155             :         struct spdk_nvme_cmds_and_effect_log_page *log_page;
    2156           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    2157             : 
    2158           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2159           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_get_zns_cmd_and_effects_log failed!\n");
    2160           0 :                 spdk_free(ctrlr->tmp_ptr);
    2161           0 :                 ctrlr->tmp_ptr = NULL;
    2162           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2163           0 :                 return;
    2164             :         }
    2165             : 
    2166           0 :         log_page = ctrlr->tmp_ptr;
    2167             : 
    2168           0 :         if (log_page->io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND].csupp) {
    2169           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED;
    2170             :         }
    2171           0 :         spdk_free(ctrlr->tmp_ptr);
    2172           0 :         ctrlr->tmp_ptr = NULL;
    2173             : 
    2174           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, ctrlr->opts.admin_timeout_ms);
    2175             : }
    2176             : 
    2177             : static int
    2178           0 : nvme_ctrlr_get_zns_cmd_and_effects_log(struct spdk_nvme_ctrlr *ctrlr)
    2179             : {
    2180             :         int rc;
    2181             : 
    2182           0 :         assert(!ctrlr->tmp_ptr);
    2183           0 :         ctrlr->tmp_ptr = spdk_zmalloc(sizeof(struct spdk_nvme_cmds_and_effect_log_page), 64, NULL,
    2184             :                                       SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2185           0 :         if (!ctrlr->tmp_ptr) {
    2186           0 :                 rc = -ENOMEM;
    2187           0 :                 goto error;
    2188             :         }
    2189             : 
    2190           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
    2191           0 :                              ctrlr->opts.admin_timeout_ms);
    2192             : 
    2193           0 :         rc = spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, SPDK_NVME_LOG_COMMAND_EFFECTS_LOG,
    2194             :                         0, ctrlr->tmp_ptr, sizeof(struct spdk_nvme_cmds_and_effect_log_page),
    2195             :                         0, 0, 0, SPDK_NVME_CSI_ZNS << 24,
    2196             :                         nvme_ctrlr_get_zns_cmd_and_effects_log_done, ctrlr);
    2197           0 :         if (rc != 0) {
    2198           0 :                 goto error;
    2199             :         }
    2200             : 
    2201           0 :         return 0;
    2202             : 
    2203           0 : error:
    2204           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2205           0 :         spdk_free(ctrlr->tmp_ptr);
    2206           0 :         ctrlr->tmp_ptr = NULL;
    2207           0 :         return rc;
    2208             : }
    2209             : 
    2210             : static void
    2211           0 : nvme_ctrlr_identify_zns_specific_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2212             : {
    2213           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2214             : 
    2215           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2216             :                 /* no need to print an error, the controller simply does not support ZNS */
    2217           0 :                 nvme_ctrlr_free_zns_specific_data(ctrlr);
    2218           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2219           0 :                                      ctrlr->opts.admin_timeout_ms);
    2220           0 :                 return;
    2221             :         }
    2222             : 
    2223             :         /* A zero zasl value means use mdts */
    2224           0 :         if (ctrlr->cdata_zns->zasl) {
    2225           0 :                 uint32_t max_append = ctrlr->min_page_size * (1 << ctrlr->cdata_zns->zasl);
    2226           0 :                 ctrlr->max_zone_append_size = spdk_min(ctrlr->max_xfer_size, max_append);
    2227             :         } else {
    2228           0 :                 ctrlr->max_zone_append_size = ctrlr->max_xfer_size;
    2229             :         }
    2230             : 
    2231           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
    2232           0 :                              ctrlr->opts.admin_timeout_ms);
    2233             : }
    2234             : 
    2235             : /**
    2236             :  * This function will try to fetch the I/O Command Specific Controller data structure for
    2237             :  * each I/O Command Set supported by SPDK.
    2238             :  *
    2239             :  * If an I/O Command Set is not supported by the controller, "Invalid Field in Command"
    2240             :  * will be returned. Since we are fetching in a exploratively way, getting an error back
    2241             :  * from the controller should not be treated as fatal.
    2242             :  *
    2243             :  * I/O Command Sets not supported by SPDK will be skipped (e.g. Key Value Command Set).
    2244             :  *
    2245             :  * I/O Command Sets without a IOCS specific data structure (i.e. a zero-filled IOCS specific
    2246             :  * data structure) will be skipped (e.g. NVM Command Set, Key Value Command Set).
    2247             :  */
    2248             : static int
    2249          19 : nvme_ctrlr_identify_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2250             : {
    2251             :         int     rc;
    2252             : 
    2253          19 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2254          19 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2255          19 :                                      ctrlr->opts.admin_timeout_ms);
    2256          19 :                 return 0;
    2257             :         }
    2258             : 
    2259             :         /*
    2260             :          * Since SPDK currently only needs to fetch a single Command Set, keep the code here,
    2261             :          * instead of creating multiple NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC substates,
    2262             :          * which would require additional functions and complexity for no good reason.
    2263             :          */
    2264           0 :         assert(!ctrlr->cdata_zns);
    2265           0 :         ctrlr->cdata_zns = spdk_zmalloc(sizeof(*ctrlr->cdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2266             :                                         SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2267           0 :         if (!ctrlr->cdata_zns) {
    2268           0 :                 rc = -ENOMEM;
    2269           0 :                 goto error;
    2270             :         }
    2271             : 
    2272           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
    2273           0 :                              ctrlr->opts.admin_timeout_ms);
    2274             : 
    2275           0 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR_IOCS, 0, 0, SPDK_NVME_CSI_ZNS,
    2276           0 :                                      ctrlr->cdata_zns, sizeof(*ctrlr->cdata_zns),
    2277             :                                      nvme_ctrlr_identify_zns_specific_done, ctrlr);
    2278           0 :         if (rc != 0) {
    2279           0 :                 goto error;
    2280             :         }
    2281             : 
    2282           0 :         return 0;
    2283             : 
    2284           0 : error:
    2285           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2286           0 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    2287           0 :         return rc;
    2288             : }
    2289             : 
    2290             : enum nvme_active_ns_state {
    2291             :         NVME_ACTIVE_NS_STATE_IDLE,
    2292             :         NVME_ACTIVE_NS_STATE_PROCESSING,
    2293             :         NVME_ACTIVE_NS_STATE_DONE,
    2294             :         NVME_ACTIVE_NS_STATE_ERROR
    2295             : };
    2296             : 
    2297             : typedef void (*nvme_active_ns_ctx_deleter)(struct nvme_active_ns_ctx *);
    2298             : 
    2299             : struct nvme_active_ns_ctx {
    2300             :         struct spdk_nvme_ctrlr *ctrlr;
    2301             :         uint32_t page_count;
    2302             :         uint32_t next_nsid;
    2303             :         uint32_t *new_ns_list;
    2304             :         nvme_active_ns_ctx_deleter deleter;
    2305             : 
    2306             :         enum nvme_active_ns_state state;
    2307             : };
    2308             : 
    2309             : static struct nvme_active_ns_ctx *
    2310          45 : nvme_active_ns_ctx_create(struct spdk_nvme_ctrlr *ctrlr, nvme_active_ns_ctx_deleter deleter)
    2311             : {
    2312             :         struct nvme_active_ns_ctx *ctx;
    2313          45 :         uint32_t *new_ns_list = NULL;
    2314             : 
    2315          45 :         ctx = calloc(1, sizeof(*ctx));
    2316          45 :         if (!ctx) {
    2317           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate nvme_active_ns_ctx!\n");
    2318           0 :                 return NULL;
    2319             :         }
    2320             : 
    2321          45 :         new_ns_list = spdk_zmalloc(sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
    2322             :                                    NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
    2323          45 :         if (!new_ns_list) {
    2324           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate active_ns_list!\n");
    2325           0 :                 free(ctx);
    2326           0 :                 return NULL;
    2327             :         }
    2328             : 
    2329          45 :         ctx->page_count = 1;
    2330          45 :         ctx->new_ns_list = new_ns_list;
    2331          45 :         ctx->ctrlr = ctrlr;
    2332          45 :         ctx->deleter = deleter;
    2333             : 
    2334          45 :         return ctx;
    2335             : }
    2336             : 
    2337             : static void
    2338          45 : nvme_active_ns_ctx_destroy(struct nvme_active_ns_ctx *ctx)
    2339             : {
    2340          45 :         spdk_free(ctx->new_ns_list);
    2341          45 :         free(ctx);
    2342          45 : }
    2343             : 
    2344             : static int
    2345       18403 : nvme_ctrlr_destruct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2346             : {
    2347       18403 :         struct spdk_nvme_ns tmp, *ns;
    2348             : 
    2349       18403 :         assert(ctrlr != NULL);
    2350             : 
    2351       18403 :         tmp.id = nsid;
    2352       18403 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    2353       18403 :         if (ns == NULL) {
    2354           0 :                 return -EINVAL;
    2355             :         }
    2356             : 
    2357       18403 :         nvme_ns_destruct(ns);
    2358       18403 :         ns->active = false;
    2359             : 
    2360       18403 :         return 0;
    2361             : }
    2362             : 
    2363             : static int
    2364       12311 : nvme_ctrlr_construct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2365             : {
    2366             :         struct spdk_nvme_ns *ns;
    2367             : 
    2368       12311 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    2369           0 :                 return -EINVAL;
    2370             :         }
    2371             : 
    2372             :         /* Namespaces are constructed on demand, so simply request it. */
    2373       12311 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2374       12311 :         if (ns == NULL) {
    2375           0 :                 return -ENOMEM;
    2376             :         }
    2377             : 
    2378       12311 :         ns->active = true;
    2379             : 
    2380       12311 :         return 0;
    2381             : }
    2382             : 
    2383             : static void
    2384          44 : nvme_ctrlr_identify_active_ns_swap(struct spdk_nvme_ctrlr *ctrlr, uint32_t *new_ns_list,
    2385             :                                    size_t max_entries)
    2386             : {
    2387          44 :         uint32_t active_ns_count = 0;
    2388             :         size_t i;
    2389             :         uint32_t nsid;
    2390             :         struct spdk_nvme_ns *ns, *tmp_ns;
    2391             :         int rc;
    2392             : 
    2393             :         /* First, remove namespaces that no longer exist */
    2394       15387 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    2395       15343 :                 nsid = new_ns_list[0];
    2396       15343 :                 active_ns_count = 0;
    2397     3547429 :                 while (nsid != 0) {
    2398     3536712 :                         if (nsid == ns->id) {
    2399        4626 :                                 break;
    2400             :                         }
    2401             : 
    2402     3532086 :                         nsid = new_ns_list[active_ns_count++];
    2403             :                 }
    2404             : 
    2405       15343 :                 if (nsid != ns->id) {
    2406             :                         /* Did not find this namespace id in the new list. */
    2407       10717 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was removed\n", ns->id);
    2408       10717 :                         nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    2409             :                 }
    2410             :         }
    2411             : 
    2412             :         /* Next, add new namespaces */
    2413          44 :         active_ns_count = 0;
    2414       12355 :         for (i = 0; i < max_entries; i++) {
    2415       12355 :                 nsid = new_ns_list[active_ns_count];
    2416             : 
    2417       12355 :                 if (nsid == 0) {
    2418          44 :                         break;
    2419             :                 }
    2420             : 
    2421             :                 /* If the namespace already exists, this will not construct it a second time. */
    2422       12311 :                 rc = nvme_ctrlr_construct_namespace(ctrlr, nsid);
    2423       12311 :                 if (rc != 0) {
    2424             :                         /* We can't easily handle a failure here. But just move on. */
    2425           0 :                         assert(false);
    2426             :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to allocate a namespace object.\n");
    2427             :                         continue;
    2428             :                 }
    2429             : 
    2430       12311 :                 active_ns_count++;
    2431             :         }
    2432             : 
    2433          44 :         ctrlr->active_ns_count = active_ns_count;
    2434          44 : }
    2435             : 
    2436             : static void
    2437          30 : nvme_ctrlr_identify_active_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2438             : {
    2439          30 :         struct nvme_active_ns_ctx *ctx = arg;
    2440          30 :         uint32_t *new_ns_list = NULL;
    2441             : 
    2442          30 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2443           1 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2444           1 :                 goto out;
    2445             :         }
    2446             : 
    2447          29 :         ctx->next_nsid = ctx->new_ns_list[1024 * ctx->page_count - 1];
    2448          29 :         if (ctx->next_nsid == 0) {
    2449          24 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2450          24 :                 goto out;
    2451             :         }
    2452             : 
    2453           5 :         ctx->page_count++;
    2454           5 :         new_ns_list = spdk_realloc(ctx->new_ns_list,
    2455           5 :                                    ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2456           5 :                                    ctx->ctrlr->page_size);
    2457           5 :         if (!new_ns_list) {
    2458           0 :                 SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2459           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2460           0 :                 goto out;
    2461             :         }
    2462             : 
    2463           5 :         ctx->new_ns_list = new_ns_list;
    2464           5 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2465           5 :         return;
    2466             : 
    2467          25 : out:
    2468          25 :         if (ctx->deleter) {
    2469           9 :                 ctx->deleter(ctx);
    2470             :         }
    2471             : }
    2472             : 
    2473             : static void
    2474          50 : nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx)
    2475             : {
    2476          50 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2477             :         uint32_t i;
    2478             :         int rc;
    2479             : 
    2480          50 :         if (ctrlr->cdata.nn == 0) {
    2481          16 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2482          16 :                 goto out;
    2483             :         }
    2484             : 
    2485          34 :         assert(ctx->new_ns_list != NULL);
    2486             : 
    2487             :         /*
    2488             :          * If controller doesn't support active ns list CNS 0x02 dummy up
    2489             :          * an active ns list, i.e. all namespaces report as active
    2490             :          */
    2491          34 :         if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 1, 0) || ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS) {
    2492             :                 uint32_t *new_ns_list;
    2493             : 
    2494             :                 /*
    2495             :                  * Active NS list must always end with zero element.
    2496             :                  * So, we allocate for cdata.nn+1.
    2497             :                  */
    2498           4 :                 ctx->page_count = spdk_divide_round_up(ctrlr->cdata.nn + 1,
    2499             :                                                        sizeof(struct spdk_nvme_ns_list) / sizeof(new_ns_list[0]));
    2500           4 :                 new_ns_list = spdk_realloc(ctx->new_ns_list,
    2501           4 :                                            ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2502           4 :                                            ctx->ctrlr->page_size);
    2503           4 :                 if (!new_ns_list) {
    2504           0 :                         SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2505           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2506           0 :                         goto out;
    2507             :                 }
    2508             : 
    2509           4 :                 ctx->new_ns_list = new_ns_list;
    2510           4 :                 ctx->new_ns_list[ctrlr->cdata.nn] = 0;
    2511        4091 :                 for (i = 0; i < ctrlr->cdata.nn; i++) {
    2512        4087 :                         ctx->new_ns_list[i] = i + 1;
    2513             :                 }
    2514             : 
    2515           4 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2516           4 :                 goto out;
    2517             :         }
    2518             : 
    2519          30 :         ctx->state = NVME_ACTIVE_NS_STATE_PROCESSING;
    2520          30 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, ctx->next_nsid, 0,
    2521          30 :                                      &ctx->new_ns_list[1024 * (ctx->page_count - 1)], sizeof(struct spdk_nvme_ns_list),
    2522             :                                      nvme_ctrlr_identify_active_ns_async_done, ctx);
    2523          30 :         if (rc != 0) {
    2524           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2525           0 :                 goto out;
    2526             :         }
    2527             : 
    2528          30 :         return;
    2529             : 
    2530          20 : out:
    2531          20 :         if (ctx->deleter) {
    2532          15 :                 ctx->deleter(ctx);
    2533             :         }
    2534             : }
    2535             : 
    2536             : static void
    2537          24 : _nvme_active_ns_ctx_deleter(struct nvme_active_ns_ctx *ctx)
    2538             : {
    2539          24 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2540             :         struct spdk_nvme_ns *ns;
    2541             : 
    2542          24 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2543           0 :                 nvme_active_ns_ctx_destroy(ctx);
    2544           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2545           0 :                 return;
    2546             :         }
    2547             : 
    2548          24 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2549             : 
    2550          28 :         RB_FOREACH(ns, nvme_ns_tree, &ctrlr->ns) {
    2551           4 :                 nvme_ns_free_iocs_specific_data(ns);
    2552             :         }
    2553             : 
    2554          24 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2555          24 :         nvme_active_ns_ctx_destroy(ctx);
    2556          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, ctrlr->opts.admin_timeout_ms);
    2557             : }
    2558             : 
    2559             : static void
    2560          24 : _nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2561             : {
    2562             :         struct nvme_active_ns_ctx *ctx;
    2563             : 
    2564          24 :         ctx = nvme_active_ns_ctx_create(ctrlr, _nvme_active_ns_ctx_deleter);
    2565          24 :         if (!ctx) {
    2566           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2567           0 :                 return;
    2568             :         }
    2569             : 
    2570          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
    2571          24 :                              ctrlr->opts.admin_timeout_ms);
    2572          24 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2573             : }
    2574             : 
    2575             : int
    2576          21 : nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2577             : {
    2578             :         struct nvme_active_ns_ctx *ctx;
    2579             :         int rc;
    2580             : 
    2581          21 :         ctx = nvme_active_ns_ctx_create(ctrlr, NULL);
    2582          21 :         if (!ctx) {
    2583           0 :                 return -ENOMEM;
    2584             :         }
    2585             : 
    2586          21 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2587          21 :         while (ctx->state == NVME_ACTIVE_NS_STATE_PROCESSING) {
    2588           0 :                 rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    2589           0 :                 if (rc < 0) {
    2590           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2591           0 :                         break;
    2592             :                 }
    2593             :         }
    2594             : 
    2595          21 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2596           1 :                 nvme_active_ns_ctx_destroy(ctx);
    2597           1 :                 return -ENXIO;
    2598             :         }
    2599             : 
    2600          20 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2601          20 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2602          20 :         nvme_active_ns_ctx_destroy(ctx);
    2603             : 
    2604          20 :         return 0;
    2605             : }
    2606             : 
    2607             : static void
    2608          21 : nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2609             : {
    2610          21 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2611          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2612             :         uint32_t nsid;
    2613             :         int rc;
    2614             : 
    2615          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2616           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2617           0 :                 return;
    2618             :         }
    2619             : 
    2620          21 :         nvme_ns_set_identify_data(ns);
    2621             : 
    2622             :         /* move on to the next active NS */
    2623          21 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2624          21 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2625          21 :         if (ns == NULL) {
    2626           6 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2627           6 :                                      ctrlr->opts.admin_timeout_ms);
    2628           6 :                 return;
    2629             :         }
    2630          15 :         ns->ctrlr = ctrlr;
    2631          15 :         ns->id = nsid;
    2632             : 
    2633          15 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2634          15 :         if (rc) {
    2635           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2636             :         }
    2637             : }
    2638             : 
    2639             : static int
    2640          21 : nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
    2641             : {
    2642          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2643             :         struct spdk_nvme_ns_data *nsdata;
    2644             : 
    2645          21 :         nsdata = &ns->nsdata;
    2646             : 
    2647          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
    2648          21 :                              ctrlr->opts.admin_timeout_ms);
    2649          21 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0,
    2650             :                                        nsdata, sizeof(*nsdata),
    2651             :                                        nvme_ctrlr_identify_ns_async_done, ns);
    2652             : }
    2653             : 
    2654             : static int
    2655          14 : nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2656             : {
    2657             :         uint32_t nsid;
    2658             :         struct spdk_nvme_ns *ns;
    2659             :         int rc;
    2660             : 
    2661          14 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2662          14 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2663          14 :         if (ns == NULL) {
    2664             :                 /* No active NS, move on to the next state */
    2665           8 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2666           8 :                                      ctrlr->opts.admin_timeout_ms);
    2667           8 :                 return 0;
    2668             :         }
    2669             : 
    2670           6 :         ns->ctrlr = ctrlr;
    2671           6 :         ns->id = nsid;
    2672             : 
    2673           6 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2674           6 :         if (rc) {
    2675           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2676             :         }
    2677             : 
    2678           6 :         return rc;
    2679             : }
    2680             : 
    2681             : static int
    2682           4 : nvme_ctrlr_identify_namespaces_iocs_specific_next(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    2683             : {
    2684             :         uint32_t nsid;
    2685             :         struct spdk_nvme_ns *ns;
    2686             :         int rc;
    2687             : 
    2688           4 :         if (!prev_nsid) {
    2689           2 :                 nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2690             :         } else {
    2691             :                 /* move on to the next active NS */
    2692           2 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, prev_nsid);
    2693             :         }
    2694             : 
    2695           4 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2696           4 :         if (ns == NULL) {
    2697             :                 /* No first/next active NS, move on to the next state */
    2698           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2699           1 :                                      ctrlr->opts.admin_timeout_ms);
    2700           1 :                 return 0;
    2701             :         }
    2702             : 
    2703             :         /* loop until we find a ns which has (supported) iocs specific data */
    2704          10 :         while (!nvme_ns_has_supported_iocs_specific_data(ns)) {
    2705           8 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2706           8 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2707           8 :                 if (ns == NULL) {
    2708             :                         /* no namespace with (supported) iocs specific data found */
    2709           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2710           1 :                                              ctrlr->opts.admin_timeout_ms);
    2711           1 :                         return 0;
    2712             :                 }
    2713             :         }
    2714             : 
    2715           2 :         rc = nvme_ctrlr_identify_ns_iocs_specific_async(ns);
    2716           2 :         if (rc) {
    2717           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2718             :         }
    2719             : 
    2720           2 :         return rc;
    2721             : }
    2722             : 
    2723             : static void
    2724           0 : nvme_ctrlr_identify_ns_zns_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2725             : {
    2726           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2727           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2728             : 
    2729           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2730           0 :                 nvme_ns_free_zns_specific_data(ns);
    2731           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2732           0 :                 return;
    2733             :         }
    2734             : 
    2735           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2736             : }
    2737             : 
    2738             : static int
    2739           2 : nvme_ctrlr_identify_ns_zns_specific_async(struct spdk_nvme_ns *ns)
    2740             : {
    2741           2 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2742             :         int rc;
    2743             : 
    2744           2 :         assert(!ns->nsdata_zns);
    2745           2 :         ns->nsdata_zns = spdk_zmalloc(sizeof(*ns->nsdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2746             :                                       SPDK_MALLOC_SHARE);
    2747           2 :         if (!ns->nsdata_zns) {
    2748           0 :                 return -ENOMEM;
    2749             :         }
    2750             : 
    2751           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2752           2 :                              ctrlr->opts.admin_timeout_ms);
    2753           2 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2754           2 :                                      ns->nsdata_zns, sizeof(*ns->nsdata_zns),
    2755             :                                      nvme_ctrlr_identify_ns_zns_specific_async_done, ns);
    2756           2 :         if (rc) {
    2757           1 :                 nvme_ns_free_zns_specific_data(ns);
    2758             :         }
    2759             : 
    2760           2 :         return rc;
    2761             : }
    2762             : 
    2763             : static void
    2764           0 : nvme_ctrlr_identify_ns_nvm_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2765             : {
    2766           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2767           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2768             : 
    2769           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2770           0 :                 nvme_ns_free_nvm_specific_data(ns);
    2771           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2772           0 :                 return;
    2773             :         }
    2774             : 
    2775           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2776             : }
    2777             : 
    2778             : static int
    2779           0 : nvme_ctrlr_identify_ns_nvm_specific_async(struct spdk_nvme_ns *ns)
    2780             : {
    2781           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2782             :         int rc;
    2783             : 
    2784           0 :         assert(!ns->nsdata_nvm);
    2785           0 :         ns->nsdata_nvm = spdk_zmalloc(sizeof(*ns->nsdata_nvm), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2786             :                                       SPDK_MALLOC_SHARE);
    2787           0 :         if (!ns->nsdata_nvm) {
    2788           0 :                 return -ENOMEM;
    2789             :         }
    2790             : 
    2791           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2792           0 :                              ctrlr->opts.admin_timeout_ms);
    2793           0 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2794           0 :                                      ns->nsdata_nvm, sizeof(*ns->nsdata_nvm),
    2795             :                                      nvme_ctrlr_identify_ns_nvm_specific_async_done, ns);
    2796           0 :         if (rc) {
    2797           0 :                 nvme_ns_free_nvm_specific_data(ns);
    2798             :         }
    2799             : 
    2800           0 :         return rc;
    2801             : }
    2802             : 
    2803             : static int
    2804           2 : nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns)
    2805             : {
    2806           2 :         switch (ns->csi) {
    2807           2 :         case SPDK_NVME_CSI_ZNS:
    2808           2 :                 return nvme_ctrlr_identify_ns_zns_specific_async(ns);
    2809           0 :         case SPDK_NVME_CSI_NVM:
    2810           0 :                 if (ns->ctrlr->cdata.ctratt.bits.elbas) {
    2811           0 :                         return nvme_ctrlr_identify_ns_nvm_specific_async(ns);
    2812             :                 }
    2813             :         /* fallthrough */
    2814             :         default:
    2815             :                 /*
    2816             :                  * This switch must handle all cases for which
    2817             :                  * nvme_ns_has_supported_iocs_specific_data() returns true,
    2818             :                  * other cases should never happen.
    2819             :                  */
    2820           0 :                 assert(0);
    2821             :         }
    2822             : 
    2823             :         return -EINVAL;
    2824             : }
    2825             : 
    2826             : static int
    2827          14 : nvme_ctrlr_identify_namespaces_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2828             : {
    2829          14 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2830             :                 /* Multi IOCS not supported/enabled, move on to the next state */
    2831          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2832          14 :                                      ctrlr->opts.admin_timeout_ms);
    2833          14 :                 return 0;
    2834             :         }
    2835             : 
    2836           0 :         return nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, 0);
    2837             : }
    2838             : 
    2839             : static void
    2840           6 : nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2841             : {
    2842           6 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2843           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2844             :         uint32_t nsid;
    2845             :         int rc;
    2846             : 
    2847           6 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2848             :                 /*
    2849             :                  * Many controllers claim to be compatible with NVMe 1.3, however,
    2850             :                  * they do not implement NS ID Desc List. Therefore, instead of setting
    2851             :                  * the state to NVME_CTRLR_STATE_ERROR, silently ignore the completion
    2852             :                  * error and move on to the next state.
    2853             :                  *
    2854             :                  * The proper way is to create a new quirk for controllers that violate
    2855             :                  * the NVMe 1.3 spec by not supporting NS ID Desc List.
    2856             :                  * (Re-using the NVME_QUIRK_IDENTIFY_CNS quirk is not possible, since
    2857             :                  * it is too generic and was added in order to handle controllers that
    2858             :                  * violate the NVMe 1.1 spec by not supporting ACTIVE LIST).
    2859             :                  */
    2860           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2861           0 :                                      ctrlr->opts.admin_timeout_ms);
    2862           0 :                 return;
    2863             :         }
    2864             : 
    2865           6 :         nvme_ns_set_id_desc_list_data(ns);
    2866             : 
    2867             :         /* move on to the next active NS */
    2868           6 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2869           6 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2870           6 :         if (ns == NULL) {
    2871           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2872           2 :                                      ctrlr->opts.admin_timeout_ms);
    2873           2 :                 return;
    2874             :         }
    2875             : 
    2876           4 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2877           4 :         if (rc) {
    2878           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2879             :         }
    2880             : }
    2881             : 
    2882             : static int
    2883           6 : nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
    2884             : {
    2885           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2886             : 
    2887           6 :         memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
    2888             : 
    2889           6 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
    2890           6 :                              ctrlr->opts.admin_timeout_ms);
    2891          12 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
    2892           6 :                                        0, ns->id, 0, ns->id_desc_list, sizeof(ns->id_desc_list),
    2893             :                                        nvme_ctrlr_identify_id_desc_async_done, ns);
    2894             : }
    2895             : 
    2896             : static int
    2897          14 : nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2898             : {
    2899             :         uint32_t nsid;
    2900             :         struct spdk_nvme_ns *ns;
    2901             :         int rc;
    2902             : 
    2903          14 :         if ((ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) &&
    2904          12 :              !(ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) ||
    2905           2 :             (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
    2906          12 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
    2907             :                 /* NS ID Desc List not supported, move on to the next state */
    2908          12 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2909          12 :                                      ctrlr->opts.admin_timeout_ms);
    2910          12 :                 return 0;
    2911             :         }
    2912             : 
    2913           2 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2914           2 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2915           2 :         if (ns == NULL) {
    2916             :                 /* No active NS, move on to the next state */
    2917           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2918           0 :                                      ctrlr->opts.admin_timeout_ms);
    2919           0 :                 return 0;
    2920             :         }
    2921             : 
    2922           2 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2923           2 :         if (rc) {
    2924           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2925             :         }
    2926             : 
    2927           2 :         return rc;
    2928             : }
    2929             : 
    2930             : static void
    2931          19 : nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
    2932             : {
    2933          19 :         if (spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    2934           4 :                 if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
    2935           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Incorrect IOCCSZ %u, the minimum value should be 4\n",
    2936             :                                           ctrlr->cdata.nvmf_specific.ioccsz);
    2937           0 :                         ctrlr->cdata.nvmf_specific.ioccsz = 4;
    2938           0 :                         assert(0);
    2939             :                 }
    2940           4 :                 ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
    2941           4 :                 ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
    2942             :         }
    2943          19 : }
    2944             : 
    2945             : static void
    2946          19 : nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2947             : {
    2948             :         uint32_t cq_allocated, sq_allocated, min_allocated, i;
    2949          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2950             : 
    2951          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2952           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Number of Queues failed!\n");
    2953           0 :                 ctrlr->opts.num_io_queues = 0;
    2954             :         } else {
    2955             :                 /*
    2956             :                  * Data in cdw0 is 0-based.
    2957             :                  * Lower 16-bits indicate number of submission queues allocated.
    2958             :                  * Upper 16-bits indicate number of completion queues allocated.
    2959             :                  */
    2960          19 :                 sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
    2961          19 :                 cq_allocated = (cpl->cdw0 >> 16) + 1;
    2962             : 
    2963             :                 /*
    2964             :                  * For 1:1 queue mapping, set number of allocated queues to be minimum of
    2965             :                  * submission and completion queues.
    2966             :                  */
    2967          19 :                 min_allocated = spdk_min(sq_allocated, cq_allocated);
    2968             : 
    2969             :                 /* Set number of queues to be minimum of requested and actually allocated. */
    2970          19 :                 ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
    2971             :         }
    2972             : 
    2973          19 :         ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
    2974          19 :         if (ctrlr->free_io_qids == NULL) {
    2975           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2976           0 :                 return;
    2977             :         }
    2978             : 
    2979             :         /* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
    2980          69 :         for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
    2981          50 :                 spdk_nvme_ctrlr_free_qid(ctrlr, i);
    2982             :         }
    2983             : 
    2984          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
    2985          19 :                              ctrlr->opts.admin_timeout_ms);
    2986             : }
    2987             : 
    2988             : static int
    2989          19 : nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
    2990             : {
    2991             :         int rc;
    2992             : 
    2993          19 :         if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
    2994           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Limiting requested num_io_queues %u to max %d\n",
    2995             :                                      ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
    2996           0 :                 ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
    2997          19 :         } else if (ctrlr->opts.num_io_queues < 1) {
    2998          13 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Requested num_io_queues 0, increasing to 1\n");
    2999          13 :                 ctrlr->opts.num_io_queues = 1;
    3000             :         }
    3001             : 
    3002          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
    3003          19 :                              ctrlr->opts.admin_timeout_ms);
    3004             : 
    3005          19 :         rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
    3006             :                                            nvme_ctrlr_set_num_queues_done, ctrlr);
    3007          19 :         if (rc != 0) {
    3008           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3009           0 :                 return rc;
    3010             :         }
    3011             : 
    3012          19 :         return 0;
    3013             : }
    3014             : 
    3015             : static void
    3016           3 : nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3017             : {
    3018             :         uint32_t keep_alive_interval_us;
    3019           3 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3020             : 
    3021           3 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3022           2 :                 if ((cpl->status.sct == SPDK_NVME_SCT_GENERIC) &&
    3023           2 :                     (cpl->status.sc == SPDK_NVME_SC_INVALID_FIELD)) {
    3024           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Keep alive timeout Get Feature is not supported\n");
    3025             :                 } else {
    3026           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: SC %x SCT %x\n",
    3027             :                                           cpl->status.sc, cpl->status.sct);
    3028           1 :                         ctrlr->opts.keep_alive_timeout_ms = 0;
    3029           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3030           1 :                         return;
    3031             :                 }
    3032             :         } else {
    3033           1 :                 if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
    3034           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Controller adjusted keep alive timeout to %u ms\n",
    3035             :                                             cpl->cdw0);
    3036             :                 }
    3037             : 
    3038           1 :                 ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
    3039             :         }
    3040             : 
    3041           2 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    3042           0 :                 ctrlr->keep_alive_interval_ticks = 0;
    3043             :         } else {
    3044           2 :                 keep_alive_interval_us = ctrlr->opts.keep_alive_timeout_ms * 1000 / 2;
    3045             : 
    3046           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Sending keep alive every %u us\n", keep_alive_interval_us);
    3047             : 
    3048           2 :                 ctrlr->keep_alive_interval_ticks = (keep_alive_interval_us * spdk_get_ticks_hz()) /
    3049             :                                                    UINT64_C(1000000);
    3050             : 
    3051             :                 /* Schedule the first Keep Alive to be sent as soon as possible. */
    3052           2 :                 ctrlr->next_keep_alive_tick = spdk_get_ticks();
    3053             :         }
    3054             : 
    3055           2 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3056           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    3057             :         } else {
    3058           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3059           2 :                                      ctrlr->opts.admin_timeout_ms);
    3060             :         }
    3061             : }
    3062             : 
    3063             : static int
    3064          22 : nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
    3065             : {
    3066             :         int rc;
    3067             : 
    3068          22 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    3069          19 :                 if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3070           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    3071             :                 } else {
    3072          19 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3073          19 :                                              ctrlr->opts.admin_timeout_ms);
    3074             :                 }
    3075          19 :                 return 0;
    3076             :         }
    3077             : 
    3078             :         /* Note: Discovery controller identify data does not populate KAS according to spec. */
    3079           3 :         if (!spdk_nvme_ctrlr_is_discovery(ctrlr) && ctrlr->cdata.kas == 0) {
    3080           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Controller KAS is 0 - not enabling Keep Alive\n");
    3081           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    3082           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3083           0 :                                      ctrlr->opts.admin_timeout_ms);
    3084           0 :                 return 0;
    3085             :         }
    3086             : 
    3087           3 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
    3088           3 :                              ctrlr->opts.admin_timeout_ms);
    3089             : 
    3090             :         /* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
    3091           3 :         rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
    3092             :                                              nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
    3093           3 :         if (rc != 0) {
    3094           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: %d\n", rc);
    3095           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    3096           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3097           0 :                 return rc;
    3098             :         }
    3099             : 
    3100           3 :         return 0;
    3101             : }
    3102             : 
    3103             : static void
    3104           0 : nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3105             : {
    3106           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3107             : 
    3108           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3109             :                 /*
    3110             :                  * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
    3111             :                  * is optional.
    3112             :                  */
    3113           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
    3114             :                                    cpl->status.sc, cpl->status.sct);
    3115             :         } else {
    3116           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Set Features - Host ID was successful\n");
    3117             :         }
    3118             : 
    3119           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3120           0 : }
    3121             : 
    3122             : static int
    3123          14 : nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
    3124             : {
    3125             :         uint8_t *host_id;
    3126             :         uint32_t host_id_size;
    3127             :         int rc;
    3128             : 
    3129          14 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    3130             :                 /*
    3131             :                  * NVMe-oF sends the host ID during Connect and doesn't allow
    3132             :                  * Set Features - Host Identifier after Connect, so we don't need to do anything here.
    3133             :                  */
    3134          14 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "NVMe-oF transport - not sending Set Features - Host ID\n");
    3135          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3136          14 :                 return 0;
    3137             :         }
    3138             : 
    3139           0 :         if (ctrlr->cdata.ctratt.bits.host_id_exhid_supported) {
    3140           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 128-bit extended host identifier\n");
    3141           0 :                 host_id = ctrlr->opts.extended_host_id;
    3142           0 :                 host_id_size = sizeof(ctrlr->opts.extended_host_id);
    3143             :         } else {
    3144           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 64-bit host identifier\n");
    3145           0 :                 host_id = ctrlr->opts.host_id;
    3146           0 :                 host_id_size = sizeof(ctrlr->opts.host_id);
    3147             :         }
    3148             : 
    3149             :         /* If the user specified an all-zeroes host identifier, don't send the command. */
    3150           0 :         if (spdk_mem_all_zero(host_id, host_id_size)) {
    3151           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "User did not specify host ID - not sending Set Features - Host ID\n");
    3152           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3153           0 :                 return 0;
    3154             :         }
    3155             : 
    3156           0 :         SPDK_LOGDUMP(nvme, "host_id", host_id, host_id_size);
    3157             : 
    3158           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
    3159           0 :                              ctrlr->opts.admin_timeout_ms);
    3160             : 
    3161           0 :         rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
    3162           0 :         if (rc != 0) {
    3163           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Host ID failed: %d\n", rc);
    3164           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3165           0 :                 return rc;
    3166             :         }
    3167             : 
    3168           0 :         return 0;
    3169             : }
    3170             : 
    3171             : void
    3172           4 : nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    3173             : {
    3174             :         uint32_t nsid;
    3175             :         struct spdk_nvme_ns *ns;
    3176             : 
    3177           4 :         for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    3178          19 :              nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
    3179          15 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    3180          15 :                 nvme_ns_construct(ns, nsid, ctrlr);
    3181             :         }
    3182           4 : }
    3183             : 
    3184             : static int
    3185           4 : nvme_ctrlr_clear_changed_ns_log(struct spdk_nvme_ctrlr *ctrlr)
    3186             : {
    3187             :         struct nvme_completion_poll_status      *status;
    3188           4 :         int             rc = -ENOMEM;
    3189           4 :         char            *buffer = NULL;
    3190             :         uint32_t        nsid;
    3191           4 :         size_t          buf_size = (SPDK_NVME_MAX_CHANGED_NAMESPACES * sizeof(uint32_t));
    3192             : 
    3193           4 :         if (ctrlr->opts.disable_read_changed_ns_list_log_page) {
    3194           0 :                 return 0;
    3195             :         }
    3196             : 
    3197           4 :         buffer = spdk_dma_zmalloc(buf_size, 4096, NULL);
    3198           4 :         if (!buffer) {
    3199           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate buffer for getting "
    3200             :                                   "changed ns log.\n");
    3201           0 :                 return rc;
    3202             :         }
    3203             : 
    3204           4 :         status = calloc(1, sizeof(*status));
    3205           4 :         if (!status) {
    3206           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    3207           0 :                 goto free_buffer;
    3208             :         }
    3209             : 
    3210           4 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr,
    3211             :                                               SPDK_NVME_LOG_CHANGED_NS_LIST,
    3212             :                                               SPDK_NVME_GLOBAL_NS_TAG,
    3213             :                                               buffer, buf_size, 0,
    3214             :                                               nvme_completion_poll_cb, status);
    3215             : 
    3216           4 :         if (rc) {
    3217           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_get_log_page() failed: rc=%d\n", rc);
    3218           0 :                 free(status);
    3219           0 :                 goto free_buffer;
    3220             :         }
    3221             : 
    3222           4 :         rc = nvme_wait_for_completion_timeout(ctrlr->adminq, status,
    3223           4 :                                               ctrlr->opts.admin_timeout_ms * 1000);
    3224           4 :         if (!status->timed_out) {
    3225           4 :                 free(status);
    3226             :         }
    3227             : 
    3228           4 :         if (rc) {
    3229           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "wait for spdk_nvme_ctrlr_cmd_get_log_page failed: rc=%d\n", rc);
    3230           0 :                 goto free_buffer;
    3231             :         }
    3232             : 
    3233             :         /* only check the case of overflow. */
    3234           4 :         nsid = from_le32(buffer);
    3235           4 :         if (nsid == 0xffffffffu) {
    3236           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "changed ns log overflowed.\n");
    3237             :         }
    3238             : 
    3239           4 : free_buffer:
    3240           4 :         spdk_dma_free(buffer);
    3241           4 :         return rc;
    3242             : }
    3243             : 
    3244             : static void
    3245           5 : nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3246             :                                const struct spdk_nvme_cpl *cpl)
    3247             : {
    3248             :         union spdk_nvme_async_event_completion event;
    3249             :         struct spdk_nvme_ctrlr_process *active_proc;
    3250             :         int rc;
    3251             : 
    3252           5 :         event.raw = cpl->cdw0;
    3253             : 
    3254           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3255           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
    3256           4 :                 nvme_ctrlr_clear_changed_ns_log(ctrlr);
    3257             : 
    3258           4 :                 rc = nvme_ctrlr_identify_active_ns(ctrlr);
    3259           4 :                 if (rc) {
    3260           0 :                         return;
    3261             :                 }
    3262           4 :                 nvme_ctrlr_update_namespaces(ctrlr);
    3263           4 :                 nvme_io_msg_ctrlr_update(ctrlr);
    3264             :         }
    3265             : 
    3266           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3267           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE)) {
    3268           1 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
    3269           1 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
    3270           1 :                         if (rc) {
    3271           0 :                                 return;
    3272             :                         }
    3273           1 :                         nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
    3274             :                                                       ctrlr);
    3275             :                 }
    3276             :         }
    3277             : 
    3278           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3279           5 :         if (active_proc && active_proc->aer_cb_fn) {
    3280           3 :                 active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
    3281             :         }
    3282             : }
    3283             : 
    3284             : static void
    3285           5 : nvme_ctrlr_queue_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3286             :                              const struct spdk_nvme_cpl *cpl)
    3287             : {
    3288             :         struct  spdk_nvme_ctrlr_aer_completion_list *nvme_event;
    3289             :         struct spdk_nvme_ctrlr_process *proc;
    3290             : 
    3291             :         /* Add async event to each process objects event list */
    3292          10 :         TAILQ_FOREACH(proc, &ctrlr->active_procs, tailq) {
    3293             :                 /* Must be shared memory so other processes can access */
    3294           5 :                 nvme_event = spdk_zmalloc(sizeof(*nvme_event), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    3295           5 :                 if (!nvme_event) {
    3296           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
    3297           0 :                         return;
    3298             :                 }
    3299           5 :                 nvme_event->cpl = *cpl;
    3300             : 
    3301           5 :                 STAILQ_INSERT_TAIL(&proc->async_events, nvme_event, link);
    3302             :         }
    3303             : }
    3304             : 
    3305             : static void
    3306           5 : nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr)
    3307             : {
    3308             :         struct  spdk_nvme_ctrlr_aer_completion_list  *nvme_event, *nvme_event_tmp;
    3309             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3310             : 
    3311           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3312             : 
    3313          10 :         STAILQ_FOREACH_SAFE(nvme_event, &active_proc->async_events, link, nvme_event_tmp) {
    3314           5 :                 STAILQ_REMOVE(&active_proc->async_events, nvme_event,
    3315             :                               spdk_nvme_ctrlr_aer_completion_list, link);
    3316           5 :                 nvme_ctrlr_process_async_event(ctrlr, &nvme_event->cpl);
    3317           5 :                 spdk_free(nvme_event);
    3318             : 
    3319             :         }
    3320           5 : }
    3321             : 
    3322             : static void
    3323           5 : nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    3324             : {
    3325           5 :         struct nvme_async_event_request *aer = arg;
    3326           5 :         struct spdk_nvme_ctrlr          *ctrlr = aer->ctrlr;
    3327             : 
    3328           5 :         if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
    3329           5 :             cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
    3330             :                 /*
    3331             :                  *  This is simulated when controller is being shut down, to
    3332             :                  *  effectively abort outstanding asynchronous event requests
    3333             :                  *  and make sure all memory is freed.  Do not repost the
    3334             :                  *  request in this case.
    3335             :                  */
    3336           0 :                 return;
    3337             :         }
    3338             : 
    3339           5 :         if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
    3340           0 :             cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
    3341             :                 /*
    3342             :                  *  SPDK will only send as many AERs as the device says it supports,
    3343             :                  *  so this status code indicates an out-of-spec device.  Do not repost
    3344             :                  *  the request in this case.
    3345             :                  */
    3346           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Controller appears out-of-spec for asynchronous event request\n"
    3347             :                                   "handling.  Do not repost this AER.\n");
    3348           0 :                 return;
    3349             :         }
    3350             : 
    3351             :         /* Add the events to the list */
    3352           5 :         nvme_ctrlr_queue_async_event(ctrlr, cpl);
    3353             : 
    3354             :         /* If the ctrlr was removed or in the destruct state, we should not send aer again */
    3355           5 :         if (ctrlr->is_removed || ctrlr->is_destructed) {
    3356           0 :                 return;
    3357             :         }
    3358             : 
    3359             :         /*
    3360             :          * Repost another asynchronous event request to replace the one
    3361             :          *  that just completed.
    3362             :          */
    3363           5 :         if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
    3364             :                 /*
    3365             :                  * We can't do anything to recover from a failure here,
    3366             :                  * so just print a warning message and leave the AER unsubmitted.
    3367             :                  */
    3368           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "resubmitting AER failed!\n");
    3369             :         }
    3370             : }
    3371             : 
    3372             : static int
    3373          24 : nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
    3374             :                                     struct nvme_async_event_request *aer)
    3375             : {
    3376             :         struct nvme_request *req;
    3377             : 
    3378          24 :         aer->ctrlr = ctrlr;
    3379          24 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
    3380          24 :         aer->req = req;
    3381          24 :         if (req == NULL) {
    3382           0 :                 return -1;
    3383             :         }
    3384             : 
    3385          24 :         req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
    3386          24 :         return nvme_ctrlr_submit_admin_request(ctrlr, req);
    3387             : }
    3388             : 
    3389             : static void
    3390          19 : nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3391             : {
    3392             :         struct nvme_async_event_request         *aer;
    3393             :         int                                     rc;
    3394             :         uint32_t                                i;
    3395          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3396             : 
    3397          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3398           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "nvme_ctrlr_configure_aer failed!\n");
    3399           0 :                 ctrlr->num_aers = 0;
    3400             :         } else {
    3401             :                 /* aerl is a zero-based value, so we need to add 1 here. */
    3402          19 :                 ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
    3403             :         }
    3404             : 
    3405          38 :         for (i = 0; i < ctrlr->num_aers; i++) {
    3406          19 :                 aer = &ctrlr->aer[i];
    3407          19 :                 rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
    3408          19 :                 if (rc) {
    3409           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_construct_and_submit_aer failed!\n");
    3410           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3411           0 :                         return;
    3412             :                 }
    3413             :         }
    3414          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, ctrlr->opts.admin_timeout_ms);
    3415             : }
    3416             : 
    3417             : static int
    3418          19 : nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
    3419             : {
    3420             :         union spdk_nvme_feat_async_event_configuration  config;
    3421             :         int                                             rc;
    3422             : 
    3423          19 :         config.raw = 0;
    3424             : 
    3425          19 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3426           0 :                 config.bits.discovery_log_change_notice = 1;
    3427             :         } else {
    3428          19 :                 config.bits.crit_warn.bits.available_spare = 1;
    3429          19 :                 config.bits.crit_warn.bits.temperature = 1;
    3430          19 :                 config.bits.crit_warn.bits.device_reliability = 1;
    3431          19 :                 config.bits.crit_warn.bits.read_only = 1;
    3432          19 :                 config.bits.crit_warn.bits.volatile_memory_backup = 1;
    3433             : 
    3434          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
    3435           4 :                         if (ctrlr->cdata.oaes.ns_attribute_notices) {
    3436           0 :                                 config.bits.ns_attr_notice = 1;
    3437             :                         }
    3438           4 :                         if (ctrlr->cdata.oaes.fw_activation_notices) {
    3439           0 :                                 config.bits.fw_activation_notice = 1;
    3440             :                         }
    3441           4 :                         if (ctrlr->cdata.oaes.ana_change_notices) {
    3442           0 :                                 config.bits.ana_change_notice = 1;
    3443             :                         }
    3444             :                 }
    3445          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
    3446           0 :                         config.bits.telemetry_log_notice = 1;
    3447             :                 }
    3448             :         }
    3449             : 
    3450          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
    3451          19 :                              ctrlr->opts.admin_timeout_ms);
    3452             : 
    3453          19 :         rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
    3454             :                         nvme_ctrlr_configure_aer_done,
    3455             :                         ctrlr);
    3456          19 :         if (rc != 0) {
    3457           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3458           0 :                 return rc;
    3459             :         }
    3460             : 
    3461          19 :         return 0;
    3462             : }
    3463             : 
    3464             : struct spdk_nvme_ctrlr_process *
    3465          61 : nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
    3466             : {
    3467             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3468             : 
    3469          61 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3470          12 :                 if (active_proc->pid == pid) {
    3471          12 :                         return active_proc;
    3472             :                 }
    3473             :         }
    3474             : 
    3475          49 :         return NULL;
    3476             : }
    3477             : 
    3478             : struct spdk_nvme_ctrlr_process *
    3479          57 : nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
    3480             : {
    3481          57 :         return nvme_ctrlr_get_process(ctrlr, getpid());
    3482             : }
    3483             : 
    3484             : /**
    3485             :  * This function will be called when a process is using the controller.
    3486             :  *  1. For the primary process, it is called when constructing the controller.
    3487             :  *  2. For the secondary process, it is called at probing the controller.
    3488             :  * Note: will check whether the process is already added for the same process.
    3489             :  */
    3490             : int
    3491           4 : nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
    3492             : {
    3493             :         struct spdk_nvme_ctrlr_process  *ctrlr_proc;
    3494           4 :         pid_t                           pid = getpid();
    3495             : 
    3496             :         /* Check whether the process is already added or not */
    3497           4 :         if (nvme_ctrlr_get_process(ctrlr, pid)) {
    3498           0 :                 return 0;
    3499             :         }
    3500             : 
    3501             :         /* Initialize the per process properties for this ctrlr */
    3502           4 :         ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
    3503             :                                   64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    3504           4 :         if (ctrlr_proc == NULL) {
    3505           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to allocate memory to track the process props\n");
    3506             : 
    3507           0 :                 return -1;
    3508             :         }
    3509             : 
    3510           4 :         ctrlr_proc->is_primary = spdk_process_is_primary();
    3511           4 :         ctrlr_proc->pid = pid;
    3512           4 :         STAILQ_INIT(&ctrlr_proc->active_reqs);
    3513           4 :         ctrlr_proc->devhandle = devhandle;
    3514           4 :         ctrlr_proc->ref = 0;
    3515           4 :         TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
    3516           4 :         STAILQ_INIT(&ctrlr_proc->async_events);
    3517             : 
    3518           4 :         TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
    3519             : 
    3520           4 :         return 0;
    3521             : }
    3522             : 
    3523             : /**
    3524             :  * This function will be called when the process detaches the controller.
    3525             :  * Note: the ctrlr_lock must be held when calling this function.
    3526             :  */
    3527             : static void
    3528           1 : nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
    3529             :                           struct spdk_nvme_ctrlr_process *proc)
    3530             : {
    3531             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3532             : 
    3533           1 :         assert(STAILQ_EMPTY(&proc->active_reqs));
    3534             : 
    3535           1 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3536           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3537             :         }
    3538             : 
    3539           1 :         TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
    3540             : 
    3541           1 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    3542           1 :                 spdk_pci_device_detach(proc->devhandle);
    3543             :         }
    3544             : 
    3545           1 :         spdk_free(proc);
    3546           1 : }
    3547             : 
    3548             : /**
    3549             :  * This function will be called when the process exited unexpectedly
    3550             :  *  in order to free any incomplete nvme request, allocated IO qpairs
    3551             :  *  and allocated memory.
    3552             :  * Note: the ctrlr_lock must be held when calling this function.
    3553             :  */
    3554             : static void
    3555           0 : nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
    3556             : {
    3557             :         struct nvme_request     *req, *tmp_req;
    3558             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3559             :         struct spdk_nvme_ctrlr_aer_completion_list *event;
    3560             : 
    3561           0 :         STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
    3562           0 :                 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
    3563             : 
    3564           0 :                 assert(req->pid == proc->pid);
    3565           0 :                 nvme_cleanup_user_req(req);
    3566           0 :                 nvme_free_request(req);
    3567             :         }
    3568             : 
    3569             :         /* Remove async event from each process objects event list */
    3570           0 :         while (!STAILQ_EMPTY(&proc->async_events)) {
    3571           0 :                 event = STAILQ_FIRST(&proc->async_events);
    3572           0 :                 STAILQ_REMOVE_HEAD(&proc->async_events, link);
    3573           0 :                 spdk_free(event);
    3574             :         }
    3575             : 
    3576           0 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3577           0 :                 TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
    3578             : 
    3579             :                 /*
    3580             :                  * The process may have been killed while some qpairs were in their
    3581             :                  *  completion context.  Clear that flag here to allow these IO
    3582             :                  *  qpairs to be deleted.
    3583             :                  */
    3584           0 :                 qpair->in_completion_context = 0;
    3585             : 
    3586           0 :                 qpair->no_deletion_notification_needed = 1;
    3587             : 
    3588           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3589             :         }
    3590             : 
    3591           0 :         spdk_free(proc);
    3592           0 : }
    3593             : 
    3594             : /**
    3595             :  * This function will be called when destructing the controller.
    3596             :  *  1. There is no more admin request on this controller.
    3597             :  *  2. Clean up any left resource allocation when its associated process is gone.
    3598             :  */
    3599             : void
    3600          50 : nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
    3601             : {
    3602             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3603             : 
    3604             :         /* Free all the processes' properties and make sure no pending admin IOs */
    3605          53 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3606           3 :                 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3607             : 
    3608           3 :                 assert(STAILQ_EMPTY(&active_proc->active_reqs));
    3609             : 
    3610           3 :                 spdk_free(active_proc);
    3611             :         }
    3612          50 : }
    3613             : 
    3614             : /**
    3615             :  * This function will be called when any other process attaches or
    3616             :  *  detaches the controller in order to cleanup those unexpectedly
    3617             :  *  terminated processes.
    3618             :  * Note: the ctrlr_lock must be held when calling this function.
    3619             :  */
    3620             : static int
    3621           0 : nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
    3622             : {
    3623             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3624           0 :         int                             active_proc_count = 0;
    3625             : 
    3626           0 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3627           0 :                 if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
    3628           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "process %d terminated unexpected\n", active_proc->pid);
    3629             : 
    3630           0 :                         TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3631             : 
    3632           0 :                         nvme_ctrlr_cleanup_process(active_proc);
    3633             :                 } else {
    3634           0 :                         active_proc_count++;
    3635             :                 }
    3636             :         }
    3637             : 
    3638           0 :         return active_proc_count;
    3639             : }
    3640             : 
    3641             : void
    3642           0 : nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
    3643             : {
    3644             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3645             : 
    3646           0 :         nvme_ctrlr_lock(ctrlr);
    3647             : 
    3648           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3649             : 
    3650           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3651           0 :         if (active_proc) {
    3652           0 :                 active_proc->ref++;
    3653             :         }
    3654             : 
    3655           0 :         nvme_ctrlr_unlock(ctrlr);
    3656           0 : }
    3657             : 
    3658             : void
    3659           0 : nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
    3660             : {
    3661             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3662             :         int                             proc_count;
    3663             : 
    3664           0 :         nvme_ctrlr_lock(ctrlr);
    3665             : 
    3666           0 :         proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
    3667             : 
    3668           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3669           0 :         if (active_proc) {
    3670           0 :                 active_proc->ref--;
    3671           0 :                 assert(active_proc->ref >= 0);
    3672             : 
    3673             :                 /*
    3674             :                  * The last active process will be removed at the end of
    3675             :                  * the destruction of the controller.
    3676             :                  */
    3677           0 :                 if (active_proc->ref == 0 && proc_count != 1) {
    3678           0 :                         nvme_ctrlr_remove_process(ctrlr, active_proc);
    3679             :                 }
    3680             :         }
    3681             : 
    3682           0 :         nvme_ctrlr_unlock(ctrlr);
    3683           0 : }
    3684             : 
    3685             : int
    3686           0 : nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
    3687             : {
    3688             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3689           0 :         int                             ref = 0;
    3690             : 
    3691           0 :         nvme_ctrlr_lock(ctrlr);
    3692             : 
    3693           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3694             : 
    3695           0 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3696           0 :                 ref += active_proc->ref;
    3697             :         }
    3698             : 
    3699           0 :         nvme_ctrlr_unlock(ctrlr);
    3700             : 
    3701           0 :         return ref;
    3702             : }
    3703             : 
    3704             : /**
    3705             :  *  Get the PCI device handle which is only visible to its associated process.
    3706             :  */
    3707             : struct spdk_pci_device *
    3708           0 : nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
    3709             : {
    3710             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3711           0 :         struct spdk_pci_device          *devhandle = NULL;
    3712             : 
    3713           0 :         nvme_ctrlr_lock(ctrlr);
    3714             : 
    3715           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3716           0 :         if (active_proc) {
    3717           0 :                 devhandle = active_proc->devhandle;
    3718             :         }
    3719             : 
    3720           0 :         nvme_ctrlr_unlock(ctrlr);
    3721             : 
    3722           0 :         return devhandle;
    3723             : }
    3724             : 
    3725             : static void
    3726          21 : nvme_ctrlr_process_init_vs_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3727             : {
    3728          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3729             : 
    3730          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3731           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the VS register\n");
    3732           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3733           0 :                 return;
    3734             :         }
    3735             : 
    3736          21 :         assert(value <= UINT32_MAX);
    3737          21 :         ctrlr->vs.raw = (uint32_t)value;
    3738          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP, NVME_TIMEOUT_INFINITE);
    3739             : }
    3740             : 
    3741             : static void
    3742          21 : nvme_ctrlr_process_init_cap_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3743             : {
    3744          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3745             : 
    3746          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3747           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CAP register\n");
    3748           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3749           0 :                 return;
    3750             :         }
    3751             : 
    3752          21 :         ctrlr->cap.raw = value;
    3753          21 :         nvme_ctrlr_init_cap(ctrlr);
    3754          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    3755             : }
    3756             : 
    3757             : static void
    3758          22 : nvme_ctrlr_process_init_check_en(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3759             : {
    3760          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3761             :         enum nvme_ctrlr_state state;
    3762             : 
    3763          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3764           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3765           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3766           0 :                 return;
    3767             :         }
    3768             : 
    3769          22 :         assert(value <= UINT32_MAX);
    3770          22 :         ctrlr->process_init_cc.raw = (uint32_t)value;
    3771             : 
    3772          22 :         if (ctrlr->process_init_cc.bits.en) {
    3773           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1\n");
    3774           2 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
    3775             :         } else {
    3776          20 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
    3777             :         }
    3778             : 
    3779          22 :         nvme_ctrlr_set_state(ctrlr, state, nvme_ctrlr_get_ready_timeout(ctrlr));
    3780             : }
    3781             : 
    3782             : static void
    3783           2 : nvme_ctrlr_process_init_set_en_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3784             : {
    3785           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3786             : 
    3787           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3788           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write the CC register\n");
    3789           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3790           0 :                 return;
    3791             :         }
    3792             : 
    3793             :         /*
    3794             :          * Wait 2.5 seconds before accessing PCI registers.
    3795             :          * Not using sleep() to avoid blocking other controller's initialization.
    3796             :          */
    3797           2 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
    3798           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Applying quirk: delay 2.5 seconds before reading registers\n");
    3799           0 :                 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
    3800             :         }
    3801             : 
    3802           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3803             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3804             : }
    3805             : 
    3806             : static void
    3807           2 : nvme_ctrlr_process_init_set_en_0_read_cc(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3808             : {
    3809           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3810             :         union spdk_nvme_cc_register cc;
    3811             :         int rc;
    3812             : 
    3813           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3814           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3815           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3816           0 :                 return;
    3817             :         }
    3818             : 
    3819           2 :         assert(value <= UINT32_MAX);
    3820           2 :         cc.raw = (uint32_t)value;
    3821           2 :         cc.bits.en = 0;
    3822           2 :         ctrlr->process_init_cc.raw = cc.raw;
    3823             : 
    3824           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
    3825             :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3826             : 
    3827           2 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_process_init_set_en_0, ctrlr);
    3828           2 :         if (rc != 0) {
    3829           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    3830           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3831             :         }
    3832             : }
    3833             : 
    3834             : static void
    3835           2 : nvme_ctrlr_process_init_wait_for_ready_1(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3836             : {
    3837           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3838             :         union spdk_nvme_csts_register csts;
    3839             : 
    3840           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3841             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3842             :                  * temporarily. Allow for this case.
    3843             :                  */
    3844           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3845           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3846           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3847             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3848             :                 } else {
    3849           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3850           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3851             :                 }
    3852             : 
    3853           0 :                 return;
    3854             :         }
    3855             : 
    3856           2 :         assert(value <= UINT32_MAX);
    3857           2 :         csts.raw = (uint32_t)value;
    3858           2 :         if (csts.bits.rdy == 1 || csts.bits.cfs == 1) {
    3859           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0,
    3860             :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3861             :         } else {
    3862           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
    3863           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3864             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3865             :         }
    3866             : }
    3867             : 
    3868             : static void
    3869          22 : nvme_ctrlr_process_init_wait_for_ready_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3870             : {
    3871          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3872             :         union spdk_nvme_csts_register csts;
    3873             : 
    3874          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3875             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3876             :                  * temporarily. Allow for this case.
    3877             :                  */
    3878           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3879           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3880           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3881             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3882             :                 } else {
    3883           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3884           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3885             :                 }
    3886             : 
    3887           0 :                 return;
    3888             :         }
    3889             : 
    3890          22 :         assert(value <= UINT32_MAX);
    3891          22 :         csts.raw = (uint32_t)value;
    3892          22 :         if (csts.bits.rdy == 0) {
    3893          22 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 0 && CSTS.RDY = 0\n");
    3894          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLED,
    3895             :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3896             :         } else {
    3897           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3898             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3899             :         }
    3900             : }
    3901             : 
    3902             : static void
    3903           9 : nvme_ctrlr_process_init_enable_wait_for_ready_1(void *ctx, uint64_t value,
    3904             :                 const struct spdk_nvme_cpl *cpl)
    3905             : {
    3906           9 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3907             :         union spdk_nvme_csts_register csts;
    3908             : 
    3909           9 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3910             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3911             :                  * temporarily. Allow for this case.
    3912             :                  */
    3913           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3914           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3915           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3916             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3917             :                 } else {
    3918           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3919           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3920             :                 }
    3921             : 
    3922           0 :                 return;
    3923             :         }
    3924             : 
    3925           9 :         assert(value <= UINT32_MAX);
    3926           9 :         csts.raw = value;
    3927           9 :         if (csts.bits.rdy == 1) {
    3928           9 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
    3929             :                 /*
    3930             :                  * The controller has been enabled.
    3931             :                  *  Perform the rest of initialization serially.
    3932             :                  */
    3933           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
    3934           9 :                                      ctrlr->opts.admin_timeout_ms);
    3935             :         } else {
    3936           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3937             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3938             :         }
    3939             : }
    3940             : 
    3941             : /**
    3942             :  * This function will be called repeatedly during initialization until the controller is ready.
    3943             :  */
    3944             : int
    3945         446 : nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
    3946             : {
    3947             :         uint32_t ready_timeout_in_ms;
    3948             :         uint64_t ticks;
    3949         446 :         int rc = 0;
    3950             : 
    3951         446 :         ticks = spdk_get_ticks();
    3952             : 
    3953             :         /*
    3954             :          * May need to avoid accessing any register on the target controller
    3955             :          * for a while. Return early without touching the FSM.
    3956             :          * Check sleep_timeout_tsc > 0 for unit test.
    3957             :          */
    3958         446 :         if ((ctrlr->sleep_timeout_tsc > 0) &&
    3959           2 :             (ticks <= ctrlr->sleep_timeout_tsc)) {
    3960           1 :                 return 0;
    3961             :         }
    3962         445 :         ctrlr->sleep_timeout_tsc = 0;
    3963             : 
    3964         445 :         ready_timeout_in_ms = nvme_ctrlr_get_ready_timeout(ctrlr);
    3965             : 
    3966             :         /*
    3967             :          * Check if the current initialization step is done or has timed out.
    3968             :          */
    3969         445 :         switch (ctrlr->state) {
    3970           1 :         case NVME_CTRLR_STATE_INIT_DELAY:
    3971           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
    3972           1 :                 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
    3973             :                         /*
    3974             :                          * Controller may need some delay before it's enabled.
    3975             :                          *
    3976             :                          * This is a workaround for an issue where the PCIe-attached NVMe controller
    3977             :                          * is not ready after VFIO reset. We delay the initialization rather than the
    3978             :                          * enabling itself, because this is required only for the very first enabling
    3979             :                          * - directly after a VFIO reset.
    3980             :                          */
    3981           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Adding 2 second delay before initializing the controller\n");
    3982           1 :                         ctrlr->sleep_timeout_tsc = ticks + (2000 * spdk_get_ticks_hz() / 1000);
    3983             :                 }
    3984           1 :                 break;
    3985             : 
    3986           0 :         case NVME_CTRLR_STATE_DISCONNECTED:
    3987           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    3988           0 :                 break;
    3989             : 
    3990          21 :         case NVME_CTRLR_STATE_CONNECT_ADMINQ: /* synonymous with NVME_CTRLR_STATE_INIT and NVME_CTRLR_STATE_DISCONNECTED */
    3991          21 :                 rc = nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq);
    3992          21 :                 if (rc == 0) {
    3993          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
    3994             :                                              NVME_TIMEOUT_INFINITE);
    3995             :                 } else {
    3996           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3997             :                 }
    3998          21 :                 break;
    3999             : 
    4000          21 :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    4001          21 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4002             : 
    4003          21 :                 switch (nvme_qpair_get_state(ctrlr->adminq)) {
    4004           0 :                 case NVME_QPAIR_CONNECTING:
    4005           0 :                         if (ctrlr->is_failed) {
    4006           0 :                                 nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    4007           0 :                                 break;
    4008             :                         }
    4009             : 
    4010           0 :                         break;
    4011          21 :                 case NVME_QPAIR_CONNECTED:
    4012          21 :                         nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
    4013             :                 /* Fall through */
    4014          21 :                 case NVME_QPAIR_ENABLED:
    4015          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS,
    4016             :                                              NVME_TIMEOUT_INFINITE);
    4017             :                         /* Abort any queued requests that were sent while the adminq was connecting
    4018             :                          * to avoid stalling the init process during a reset, as requests don't get
    4019             :                          * resubmitted while the controller is resetting and subsequent commands
    4020             :                          * would get queued too.
    4021             :                          */
    4022          21 :                         nvme_qpair_abort_queued_reqs(ctrlr->adminq);
    4023          21 :                         break;
    4024           0 :                 case NVME_QPAIR_DISCONNECTING:
    4025           0 :                         assert(ctrlr->adminq->async == true);
    4026           0 :                         break;
    4027           0 :                 case NVME_QPAIR_DISCONNECTED:
    4028             :                 /* fallthrough */
    4029             :                 default:
    4030           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4031           0 :                         break;
    4032             :                 }
    4033             : 
    4034          21 :                 break;
    4035             : 
    4036          21 :         case NVME_CTRLR_STATE_READ_VS:
    4037          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS, NVME_TIMEOUT_INFINITE);
    4038          21 :                 rc = nvme_ctrlr_get_vs_async(ctrlr, nvme_ctrlr_process_init_vs_done, ctrlr);
    4039          21 :                 break;
    4040             : 
    4041          21 :         case NVME_CTRLR_STATE_READ_CAP:
    4042          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP, NVME_TIMEOUT_INFINITE);
    4043          21 :                 rc = nvme_ctrlr_get_cap_async(ctrlr, nvme_ctrlr_process_init_cap_done, ctrlr);
    4044          21 :                 break;
    4045             : 
    4046          22 :         case NVME_CTRLR_STATE_CHECK_EN:
    4047             :                 /* Begin the hardware initialization by making sure the controller is disabled. */
    4048          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC, ready_timeout_in_ms);
    4049          22 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_check_en, ctrlr);
    4050          22 :                 break;
    4051             : 
    4052           2 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    4053             :                 /*
    4054             :                  * Controller is currently enabled. We need to disable it to cause a reset.
    4055             :                  *
    4056             :                  * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
    4057             :                  *  Wait for the ready bit to be 1 before disabling the controller.
    4058             :                  */
    4059           2 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    4060             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4061           2 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_1, ctrlr);
    4062           2 :                 break;
    4063             : 
    4064           2 :         case NVME_CTRLR_STATE_SET_EN_0:
    4065           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 0\n");
    4066           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC, ready_timeout_in_ms);
    4067           2 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_set_en_0_read_cc, ctrlr);
    4068           2 :                 break;
    4069             : 
    4070          22 :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    4071          22 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
    4072             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4073          22 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_0, ctrlr);
    4074          22 :                 break;
    4075             : 
    4076          21 :         case NVME_CTRLR_STATE_DISABLED:
    4077          21 :                 if (ctrlr->is_disconnecting) {
    4078           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr was disabled.\n");
    4079             :                 } else {
    4080          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
    4081             : 
    4082             :                         /*
    4083             :                          * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
    4084             :                          *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
    4085             :                          */
    4086          21 :                         spdk_delay_us(100);
    4087             :                 }
    4088          21 :                 break;
    4089             : 
    4090          21 :         case NVME_CTRLR_STATE_ENABLE:
    4091          21 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 1\n");
    4092          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC, ready_timeout_in_ms);
    4093          21 :                 rc = nvme_ctrlr_enable(ctrlr);
    4094          21 :                 if (rc) {
    4095           7 :                         NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr enable failed with error: %d", rc);
    4096             :                 }
    4097          21 :                 return rc;
    4098             : 
    4099           9 :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    4100           9 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    4101             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4102           9 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_enable_wait_for_ready_1,
    4103             :                                                ctrlr);
    4104           9 :                 break;
    4105             : 
    4106           9 :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    4107           9 :                 nvme_transport_qpair_reset(ctrlr->adminq);
    4108           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, NVME_TIMEOUT_INFINITE);
    4109           9 :                 break;
    4110             : 
    4111          16 :         case NVME_CTRLR_STATE_IDENTIFY:
    4112          16 :                 rc = nvme_ctrlr_identify(ctrlr);
    4113          16 :                 break;
    4114             : 
    4115          19 :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    4116          19 :                 rc = nvme_ctrlr_configure_aer(ctrlr);
    4117          19 :                 break;
    4118             : 
    4119          22 :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    4120          22 :                 rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
    4121          22 :                 break;
    4122             : 
    4123          19 :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    4124          19 :                 rc = nvme_ctrlr_identify_iocs_specific(ctrlr);
    4125          19 :                 break;
    4126             : 
    4127           0 :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    4128           0 :                 rc = nvme_ctrlr_get_zns_cmd_and_effects_log(ctrlr);
    4129           0 :                 break;
    4130             : 
    4131          19 :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    4132          19 :                 nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
    4133          19 :                 rc = nvme_ctrlr_set_num_queues(ctrlr);
    4134          19 :                 break;
    4135             : 
    4136          24 :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    4137          24 :                 _nvme_ctrlr_identify_active_ns(ctrlr);
    4138          24 :                 break;
    4139             : 
    4140          14 :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    4141          14 :                 rc = nvme_ctrlr_identify_namespaces(ctrlr);
    4142          14 :                 break;
    4143             : 
    4144          14 :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    4145          14 :                 rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
    4146          14 :                 break;
    4147             : 
    4148          14 :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    4149          14 :                 rc = nvme_ctrlr_identify_namespaces_iocs_specific(ctrlr);
    4150          14 :                 break;
    4151             : 
    4152          15 :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    4153          15 :                 rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
    4154          15 :                 break;
    4155             : 
    4156           1 :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    4157           1 :                 rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
    4158           1 :                 break;
    4159             : 
    4160          14 :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    4161          14 :                 nvme_ctrlr_set_supported_features(ctrlr);
    4162          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_FEATURE,
    4163          14 :                                      ctrlr->opts.admin_timeout_ms);
    4164          14 :                 break;
    4165             : 
    4166          16 :         case NVME_CTRLR_STATE_SET_HOST_FEATURE:
    4167          16 :                 rc = nvme_ctrlr_set_host_feature(ctrlr);
    4168          16 :                 break;
    4169             : 
    4170          14 :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    4171          14 :                 rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
    4172          14 :                 break;
    4173             : 
    4174          14 :         case NVME_CTRLR_STATE_SET_HOST_ID:
    4175          14 :                 rc = nvme_ctrlr_set_host_id(ctrlr);
    4176          14 :                 break;
    4177             : 
    4178          17 :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    4179          17 :                 rc = nvme_transport_ctrlr_ready(ctrlr);
    4180          17 :                 if (rc) {
    4181           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Transport controller ready step failed: rc %d\n", rc);
    4182           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4183             :                 } else {
    4184          16 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    4185             :                 }
    4186          17 :                 break;
    4187             : 
    4188           0 :         case NVME_CTRLR_STATE_READY:
    4189           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr already in ready state\n");
    4190           0 :                 return 0;
    4191             : 
    4192           0 :         case NVME_CTRLR_STATE_ERROR:
    4193           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr is in error state\n");
    4194           0 :                 return -1;
    4195             : 
    4196           0 :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    4197             :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    4198             :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    4199             :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    4200             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4201             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    4202             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    4203             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4204             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    4205             :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    4206             :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    4207             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    4208             :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    4209             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    4210             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    4211             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    4212             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    4213             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    4214             :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    4215             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
    4216             :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    4217             :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    4218             :                 /*
    4219             :                  * nvme_ctrlr_process_init() may be called from the completion context
    4220             :                  * for the admin qpair. Avoid recursive calls for this case.
    4221             :                  */
    4222           0 :                 if (!ctrlr->adminq->in_completion_context) {
    4223           0 :                         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4224             :                 }
    4225           0 :                 break;
    4226             : 
    4227           0 :         default:
    4228           0 :                 assert(0);
    4229             :                 return -1;
    4230             :         }
    4231             : 
    4232         424 :         if (rc) {
    4233           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr operation failed with error: %d, ctrlr state: %d (%s)\n",
    4234             :                                   rc, ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4235             :         }
    4236             : 
    4237             :         /* Note: we use the ticks captured when we entered this function.
    4238             :          * This covers environments where the SPDK process gets swapped out after
    4239             :          * we tried to advance the state but before we check the timeout here.
    4240             :          * It is not normal for this to happen, but harmless to handle it in this
    4241             :          * way.
    4242             :          */
    4243         424 :         if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
    4244           0 :             ticks > ctrlr->state_timeout_tsc) {
    4245           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Initialization timed out in state %d (%s)\n",
    4246             :                                   ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4247           0 :                 return -1;
    4248             :         }
    4249             : 
    4250         424 :         return rc;
    4251             : }
    4252             : 
    4253             : int
    4254          47 : nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
    4255             : {
    4256          47 :         pthread_mutexattr_t attr;
    4257          47 :         int rc = 0;
    4258             : 
    4259          47 :         if (pthread_mutexattr_init(&attr)) {
    4260           0 :                 return -1;
    4261             :         }
    4262          94 :         if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
    4263             : #ifndef __FreeBSD__
    4264          94 :             pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
    4265          94 :             pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
    4266             : #endif
    4267          47 :             pthread_mutex_init(mtx, &attr)) {
    4268           0 :                 rc = -1;
    4269             :         }
    4270          47 :         pthread_mutexattr_destroy(&attr);
    4271          47 :         return rc;
    4272             : }
    4273             : 
    4274             : int
    4275          47 : nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
    4276             : {
    4277             :         int rc;
    4278             : 
    4279          47 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    4280           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
    4281             :         } else {
    4282          46 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    4283             :         }
    4284             : 
    4285          47 :         if (ctrlr->opts.admin_queue_size > SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES) {
    4286           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "admin_queue_size %u exceeds max defined by NVMe spec, use max value\n",
    4287             :                                   ctrlr->opts.admin_queue_size);
    4288           0 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES;
    4289             :         }
    4290             : 
    4291          47 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE &&
    4292           0 :             (ctrlr->opts.admin_queue_size % SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE) != 0) {
    4293           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4294             :                                   "admin_queue_size %u is invalid for this NVMe device, adjust to next multiple\n",
    4295             :                                   ctrlr->opts.admin_queue_size);
    4296           0 :                 ctrlr->opts.admin_queue_size = SPDK_ALIGN_CEIL(ctrlr->opts.admin_queue_size,
    4297             :                                                SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE);
    4298             :         }
    4299             : 
    4300          47 :         if (ctrlr->opts.admin_queue_size < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES) {
    4301          26 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4302             :                                   "admin_queue_size %u is less than minimum defined by NVMe spec, use min value\n",
    4303             :                                   ctrlr->opts.admin_queue_size);
    4304          26 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES;
    4305             :         }
    4306             : 
    4307          47 :         ctrlr->flags = 0;
    4308          47 :         ctrlr->free_io_qids = NULL;
    4309          47 :         ctrlr->is_resetting = false;
    4310          47 :         ctrlr->is_failed = false;
    4311          47 :         ctrlr->is_destructed = false;
    4312             : 
    4313          47 :         TAILQ_INIT(&ctrlr->active_io_qpairs);
    4314          47 :         STAILQ_INIT(&ctrlr->queued_aborts);
    4315          47 :         ctrlr->outstanding_aborts = 0;
    4316             : 
    4317          47 :         ctrlr->ana_log_page = NULL;
    4318          47 :         ctrlr->ana_log_page_size = 0;
    4319             : 
    4320          47 :         rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
    4321          47 :         if (rc != 0) {
    4322           0 :                 return rc;
    4323             :         }
    4324             : 
    4325          47 :         TAILQ_INIT(&ctrlr->active_procs);
    4326          47 :         STAILQ_INIT(&ctrlr->register_operations);
    4327             : 
    4328          47 :         RB_INIT(&ctrlr->ns);
    4329             : 
    4330          47 :         return rc;
    4331             : }
    4332             : 
    4333             : static void
    4334          21 : nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr)
    4335             : {
    4336          21 :         if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
    4337           5 :                 ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
    4338             :         }
    4339             : 
    4340          21 :         ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
    4341             : 
    4342             :         /* For now, always select page_size == min_page_size. */
    4343          21 :         ctrlr->page_size = ctrlr->min_page_size;
    4344             : 
    4345          21 :         ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
    4346          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
    4347          21 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE &&
    4348           0 :             ctrlr->opts.io_queue_size == DEFAULT_IO_QUEUE_SIZE) {
    4349             :                 /* If the user specifically set an IO queue size different than the
    4350             :                  * default, use that value.  Otherwise overwrite with the quirked value.
    4351             :                  * This allows this quirk to be overridden when necessary.
    4352             :                  * However, cap.mqes still needs to be respected.
    4353             :                  */
    4354           0 :                 ctrlr->opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK;
    4355             :         }
    4356          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
    4357             : 
    4358          21 :         ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
    4359          21 : }
    4360             : 
    4361             : void
    4362          47 : nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
    4363             : {
    4364             :         int rc;
    4365             : 
    4366          47 :         if (ctrlr->lock_depth > 0) {
    4367           0 :                 SPDK_ERRLOG("lock currently held (depth=%d)!\n", ctrlr->lock_depth);
    4368           0 :                 assert(false);
    4369             :         }
    4370             : 
    4371          47 :         rc = pthread_mutex_destroy(&ctrlr->ctrlr_lock);
    4372          47 :         if (rc) {
    4373           0 :                 SPDK_ERRLOG("could not destroy ctrlr_lock: %s\n", spdk_strerror(rc));
    4374           0 :                 assert(false);
    4375             :         }
    4376             : 
    4377          47 :         nvme_ctrlr_free_processes(ctrlr);
    4378          47 : }
    4379             : 
    4380             : void
    4381          47 : nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
    4382             :                           struct nvme_ctrlr_detach_ctx *ctx)
    4383             : {
    4384             :         struct spdk_nvme_qpair *qpair, *tmp;
    4385             : 
    4386          47 :         NVME_CTRLR_DEBUGLOG(ctrlr, "Prepare to destruct SSD\n");
    4387             : 
    4388          47 :         ctrlr->prepare_for_reset = false;
    4389          47 :         ctrlr->is_destructed = true;
    4390             : 
    4391          47 :         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4392             : 
    4393          47 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    4394          47 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    4395             : 
    4396          47 :         TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
    4397           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    4398             :         }
    4399             : 
    4400          47 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    4401          47 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    4402             : 
    4403          47 :         nvme_ctrlr_shutdown_async(ctrlr, ctx);
    4404          47 : }
    4405             : 
    4406             : int
    4407          86 : nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    4408             :                                struct nvme_ctrlr_detach_ctx *ctx)
    4409             : {
    4410             :         struct spdk_nvme_ns *ns, *tmp_ns;
    4411          86 :         int rc = 0;
    4412             : 
    4413          86 :         if (!ctx->shutdown_complete) {
    4414          78 :                 rc = nvme_ctrlr_shutdown_poll_async(ctrlr, ctx);
    4415          78 :                 if (rc == -EAGAIN) {
    4416          39 :                         return -EAGAIN;
    4417             :                 }
    4418             :                 /* Destruct ctrlr forcefully for any other error. */
    4419             :         }
    4420             : 
    4421          47 :         if (ctx->cb_fn) {
    4422           0 :                 ctx->cb_fn(ctrlr);
    4423             :         }
    4424             : 
    4425          47 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    4426             : 
    4427        7733 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    4428        7686 :                 nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    4429        7686 :                 RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    4430        7686 :                 spdk_free(ns);
    4431             :         }
    4432             : 
    4433          47 :         ctrlr->active_ns_count = 0;
    4434             : 
    4435          47 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    4436             : 
    4437          47 :         free(ctrlr->ana_log_page);
    4438          47 :         free(ctrlr->copied_ana_desc);
    4439          47 :         ctrlr->ana_log_page = NULL;
    4440          47 :         ctrlr->copied_ana_desc = NULL;
    4441          47 :         ctrlr->ana_log_page_size = 0;
    4442             : 
    4443          47 :         nvme_transport_ctrlr_destruct(ctrlr);
    4444             : 
    4445          47 :         return rc;
    4446             : }
    4447             : 
    4448             : void
    4449          47 : nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
    4450             : {
    4451          47 :         struct nvme_ctrlr_detach_ctx ctx = { .ctrlr = ctrlr };
    4452             :         int rc;
    4453             : 
    4454          47 :         nvme_ctrlr_destruct_async(ctrlr, &ctx);
    4455             : 
    4456             :         while (1) {
    4457          86 :                 rc = nvme_ctrlr_destruct_poll_async(ctrlr, &ctx);
    4458          86 :                 if (rc != -EAGAIN) {
    4459          47 :                         break;
    4460             :                 }
    4461          39 :                 nvme_delay(1000);
    4462             :         }
    4463          47 : }
    4464             : 
    4465             : int
    4466          24 : nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
    4467             :                                 struct nvme_request *req)
    4468             : {
    4469          24 :         return nvme_qpair_submit_request(ctrlr->adminq, req);
    4470             : }
    4471             : 
    4472             : static void
    4473           0 : nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
    4474             : {
    4475             :         /* Do nothing */
    4476           0 : }
    4477             : 
    4478             : /*
    4479             :  * Check if we need to send a Keep Alive command.
    4480             :  * Caller must hold ctrlr->ctrlr_lock.
    4481             :  */
    4482             : static int
    4483           0 : nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
    4484             : {
    4485             :         uint64_t now;
    4486             :         struct nvme_request *req;
    4487             :         struct spdk_nvme_cmd *cmd;
    4488           0 :         int rc = 0;
    4489             : 
    4490           0 :         now = spdk_get_ticks();
    4491           0 :         if (now < ctrlr->next_keep_alive_tick) {
    4492           0 :                 return rc;
    4493             :         }
    4494             : 
    4495           0 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
    4496           0 :         if (req == NULL) {
    4497           0 :                 return rc;
    4498             :         }
    4499             : 
    4500           0 :         cmd = &req->cmd;
    4501           0 :         cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
    4502             : 
    4503           0 :         rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
    4504           0 :         if (rc != 0) {
    4505           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Submitting Keep Alive failed\n");
    4506           0 :                 rc = -ENXIO;
    4507             :         }
    4508             : 
    4509           0 :         ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
    4510           0 :         return rc;
    4511             : }
    4512             : 
    4513             : int32_t
    4514           1 : spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
    4515             : {
    4516             :         int32_t num_completions;
    4517             :         int32_t rc;
    4518             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4519             : 
    4520           1 :         nvme_ctrlr_lock(ctrlr);
    4521             : 
    4522           1 :         if (ctrlr->keep_alive_interval_ticks) {
    4523           0 :                 rc = nvme_ctrlr_keep_alive(ctrlr);
    4524           0 :                 if (rc) {
    4525           0 :                         nvme_ctrlr_unlock(ctrlr);
    4526           0 :                         return rc;
    4527             :                 }
    4528             :         }
    4529             : 
    4530           1 :         rc = nvme_io_msg_process(ctrlr);
    4531           1 :         if (rc < 0) {
    4532           0 :                 nvme_ctrlr_unlock(ctrlr);
    4533           0 :                 return rc;
    4534             :         }
    4535           1 :         num_completions = rc;
    4536             : 
    4537           1 :         rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4538             : 
    4539             :         /* Each process has an async list, complete the ones for this process object */
    4540           1 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4541           1 :         if (active_proc) {
    4542           0 :                 nvme_ctrlr_complete_queued_async_events(ctrlr);
    4543             :         }
    4544             : 
    4545           1 :         if (rc == -ENXIO && ctrlr->is_disconnecting) {
    4546           1 :                 nvme_ctrlr_disconnect_done(ctrlr);
    4547             :         }
    4548             : 
    4549           1 :         nvme_ctrlr_unlock(ctrlr);
    4550             : 
    4551           1 :         if (rc < 0) {
    4552           1 :                 num_completions = rc;
    4553             :         } else {
    4554           0 :                 num_completions += rc;
    4555             :         }
    4556             : 
    4557           1 :         return num_completions;
    4558             : }
    4559             : 
    4560             : const struct spdk_nvme_ctrlr_data *
    4561           0 : spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
    4562             : {
    4563           0 :         return &ctrlr->cdata;
    4564             : }
    4565             : 
    4566           0 : union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
    4567             : {
    4568           0 :         union spdk_nvme_csts_register csts;
    4569             : 
    4570           0 :         if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
    4571           0 :                 csts.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4572             :         }
    4573           0 :         return csts;
    4574             : }
    4575             : 
    4576           0 : union spdk_nvme_cc_register spdk_nvme_ctrlr_get_regs_cc(struct spdk_nvme_ctrlr *ctrlr)
    4577             : {
    4578           0 :         union spdk_nvme_cc_register cc;
    4579             : 
    4580           0 :         if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
    4581           0 :                 cc.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4582             :         }
    4583           0 :         return cc;
    4584             : }
    4585             : 
    4586           0 : union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
    4587             : {
    4588           0 :         return ctrlr->cap;
    4589             : }
    4590             : 
    4591           0 : union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
    4592             : {
    4593           0 :         return ctrlr->vs;
    4594             : }
    4595             : 
    4596           0 : union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
    4597             : {
    4598           0 :         union spdk_nvme_cmbsz_register cmbsz;
    4599             : 
    4600           0 :         if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
    4601           0 :                 cmbsz.raw = 0;
    4602             :         }
    4603             : 
    4604           0 :         return cmbsz;
    4605             : }
    4606             : 
    4607           0 : union spdk_nvme_pmrcap_register spdk_nvme_ctrlr_get_regs_pmrcap(struct spdk_nvme_ctrlr *ctrlr)
    4608             : {
    4609           0 :         union spdk_nvme_pmrcap_register pmrcap;
    4610             : 
    4611           0 :         if (nvme_ctrlr_get_pmrcap(ctrlr, &pmrcap)) {
    4612           0 :                 pmrcap.raw = 0;
    4613             :         }
    4614             : 
    4615           0 :         return pmrcap;
    4616             : }
    4617             : 
    4618           0 : union spdk_nvme_bpinfo_register spdk_nvme_ctrlr_get_regs_bpinfo(struct spdk_nvme_ctrlr *ctrlr)
    4619             : {
    4620           0 :         union spdk_nvme_bpinfo_register bpinfo;
    4621             : 
    4622           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    4623           0 :                 bpinfo.raw = 0;
    4624             :         }
    4625             : 
    4626           0 :         return bpinfo;
    4627             : }
    4628             : 
    4629             : uint64_t
    4630           0 : spdk_nvme_ctrlr_get_pmrsz(struct spdk_nvme_ctrlr *ctrlr)
    4631             : {
    4632           0 :         return ctrlr->pmr_size;
    4633             : }
    4634             : 
    4635             : uint32_t
    4636           2 : spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
    4637             : {
    4638           2 :         return ctrlr->cdata.nn;
    4639             : }
    4640             : 
    4641             : bool
    4642        9301 : spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4643             : {
    4644        9301 :         struct spdk_nvme_ns tmp, *ns;
    4645             : 
    4646        9301 :         tmp.id = nsid;
    4647        9301 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4648             : 
    4649        9301 :         if (ns != NULL) {
    4650        9209 :                 return ns->active;
    4651             :         }
    4652             : 
    4653          92 :         return false;
    4654             : }
    4655             : 
    4656             : uint32_t
    4657          35 : spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    4658             : {
    4659             :         struct spdk_nvme_ns *ns;
    4660             : 
    4661          35 :         ns = RB_MIN(nvme_ns_tree, &ctrlr->ns);
    4662          35 :         if (ns == NULL) {
    4663          10 :                 return 0;
    4664             :         }
    4665             : 
    4666        4618 :         while (ns != NULL) {
    4667        4615 :                 if (ns->active) {
    4668          22 :                         return ns->id;
    4669             :                 }
    4670             : 
    4671        4593 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4672             :         }
    4673             : 
    4674           3 :         return 0;
    4675             : }
    4676             : 
    4677             : uint32_t
    4678        4657 : spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    4679             : {
    4680        4657 :         struct spdk_nvme_ns tmp, *ns;
    4681             : 
    4682        4657 :         tmp.id = prev_nsid;
    4683        4657 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4684        4657 :         if (ns == NULL) {
    4685           5 :                 return 0;
    4686             :         }
    4687             : 
    4688        4652 :         ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4689        6184 :         while (ns != NULL) {
    4690        6164 :                 if (ns->active) {
    4691        4632 :                         return ns->id;
    4692             :                 }
    4693             : 
    4694        1532 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4695             :         }
    4696             : 
    4697          20 :         return 0;
    4698             : }
    4699             : 
    4700             : struct spdk_nvme_ns *
    4701       12403 : spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4702             : {
    4703       12403 :         struct spdk_nvme_ns tmp;
    4704             :         struct spdk_nvme_ns *ns;
    4705             : 
    4706       12403 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    4707          18 :                 return NULL;
    4708             :         }
    4709             : 
    4710       12385 :         nvme_ctrlr_lock(ctrlr);
    4711             : 
    4712       12385 :         tmp.id = nsid;
    4713       12385 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4714             : 
    4715       12385 :         if (ns == NULL) {
    4716        7687 :                 ns = spdk_zmalloc(sizeof(struct spdk_nvme_ns), 64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    4717        7687 :                 if (ns == NULL) {
    4718           0 :                         nvme_ctrlr_unlock(ctrlr);
    4719           0 :                         return NULL;
    4720             :                 }
    4721             : 
    4722        7687 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was added\n", nsid);
    4723        7687 :                 ns->id = nsid;
    4724        7687 :                 RB_INSERT(nvme_ns_tree, &ctrlr->ns, ns);
    4725             :         }
    4726             : 
    4727       12385 :         nvme_ctrlr_unlock(ctrlr);
    4728             : 
    4729       12385 :         return ns;
    4730             : }
    4731             : 
    4732             : struct spdk_pci_device *
    4733           0 : spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
    4734             : {
    4735           0 :         if (ctrlr == NULL) {
    4736           0 :                 return NULL;
    4737             :         }
    4738             : 
    4739           0 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    4740           0 :                 return NULL;
    4741             :         }
    4742             : 
    4743           0 :         return nvme_ctrlr_proc_get_devhandle(ctrlr);
    4744             : }
    4745             : 
    4746             : int32_t
    4747           3 : spdk_nvme_ctrlr_get_numa_id(struct spdk_nvme_ctrlr *ctrlr)
    4748             : {
    4749           3 :         if (ctrlr->numa.id_valid) {
    4750           2 :                 return ctrlr->numa.id;
    4751             :         } else {
    4752           1 :                 return SPDK_ENV_NUMA_ID_ANY;
    4753             :         }
    4754             : }
    4755             : 
    4756             : uint32_t
    4757           0 : spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
    4758             : {
    4759           0 :         return ctrlr->max_xfer_size;
    4760             : }
    4761             : 
    4762             : uint16_t
    4763           0 : spdk_nvme_ctrlr_get_max_sges(const struct spdk_nvme_ctrlr *ctrlr)
    4764             : {
    4765           0 :         if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
    4766           0 :                 return ctrlr->max_sges;
    4767             :         } else {
    4768           0 :                 return UINT16_MAX;
    4769             :         }
    4770             : }
    4771             : 
    4772             : void
    4773           2 : spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
    4774             :                                       spdk_nvme_aer_cb aer_cb_fn,
    4775             :                                       void *aer_cb_arg)
    4776             : {
    4777             :         struct spdk_nvme_ctrlr_process *active_proc;
    4778             : 
    4779           2 :         nvme_ctrlr_lock(ctrlr);
    4780             : 
    4781           2 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4782           2 :         if (active_proc) {
    4783           2 :                 active_proc->aer_cb_fn = aer_cb_fn;
    4784           2 :                 active_proc->aer_cb_arg = aer_cb_arg;
    4785             :         }
    4786             : 
    4787           2 :         nvme_ctrlr_unlock(ctrlr);
    4788           2 : }
    4789             : 
    4790             : void
    4791           0 : spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page(struct spdk_nvme_ctrlr *ctrlr)
    4792             : {
    4793           0 :         ctrlr->opts.disable_read_changed_ns_list_log_page = true;
    4794           0 : }
    4795             : 
    4796             : void
    4797           0 : spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
    4798             :                 uint64_t timeout_io_us, uint64_t timeout_admin_us,
    4799             :                 spdk_nvme_timeout_cb cb_fn, void *cb_arg)
    4800             : {
    4801             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4802             : 
    4803           0 :         nvme_ctrlr_lock(ctrlr);
    4804             : 
    4805           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4806           0 :         if (active_proc) {
    4807           0 :                 active_proc->timeout_io_ticks = timeout_io_us * spdk_get_ticks_hz() / 1000000ULL;
    4808           0 :                 active_proc->timeout_admin_ticks = timeout_admin_us * spdk_get_ticks_hz() / 1000000ULL;
    4809           0 :                 active_proc->timeout_cb_fn = cb_fn;
    4810           0 :                 active_proc->timeout_cb_arg = cb_arg;
    4811             :         }
    4812             : 
    4813           0 :         ctrlr->timeout_enabled = true;
    4814             : 
    4815           0 :         nvme_ctrlr_unlock(ctrlr);
    4816           0 : }
    4817             : 
    4818             : bool
    4819           8 : spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
    4820             : {
    4821             :         /* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
    4822             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
    4823           8 :         return ctrlr->log_page_supported[log_page];
    4824             : }
    4825             : 
    4826             : bool
    4827           4 : spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
    4828             : {
    4829             :         /* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
    4830             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
    4831           4 :         return ctrlr->feature_supported[feature_code];
    4832             : }
    4833             : 
    4834             : int
    4835           1 : spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4836             :                           struct spdk_nvme_ctrlr_list *payload)
    4837             : {
    4838             :         struct nvme_completion_poll_status      *status;
    4839             :         struct spdk_nvme_ns                     *ns;
    4840             :         int                                     res;
    4841             : 
    4842           1 :         if (nsid == 0) {
    4843           0 :                 return -EINVAL;
    4844             :         }
    4845             : 
    4846           1 :         status = calloc(1, sizeof(*status));
    4847           1 :         if (!status) {
    4848           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4849           0 :                 return -ENOMEM;
    4850             :         }
    4851             : 
    4852           1 :         res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
    4853             :                                        nvme_completion_poll_cb, status);
    4854           1 :         if (res) {
    4855           0 :                 free(status);
    4856           0 :                 return res;
    4857             :         }
    4858           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4859           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_attach_ns failed!\n");
    4860           0 :                 if (!status->timed_out) {
    4861           0 :                         free(status);
    4862             :                 }
    4863           0 :                 return -ENXIO;
    4864             :         }
    4865           1 :         free(status);
    4866             : 
    4867           1 :         res = nvme_ctrlr_identify_active_ns(ctrlr);
    4868           1 :         if (res) {
    4869           0 :                 return res;
    4870             :         }
    4871             : 
    4872           1 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    4873           1 :         if (ns == NULL) {
    4874           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_get_ns failed!\n");
    4875           0 :                 return -ENXIO;
    4876             :         }
    4877             : 
    4878           1 :         return nvme_ns_construct(ns, nsid, ctrlr);
    4879             : }
    4880             : 
    4881             : int
    4882           1 : spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4883             :                           struct spdk_nvme_ctrlr_list *payload)
    4884             : {
    4885             :         struct nvme_completion_poll_status      *status;
    4886             :         int                                     res;
    4887             : 
    4888           1 :         if (nsid == 0) {
    4889           0 :                 return -EINVAL;
    4890             :         }
    4891             : 
    4892           1 :         status = calloc(1, sizeof(*status));
    4893           1 :         if (!status) {
    4894           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4895           0 :                 return -ENOMEM;
    4896             :         }
    4897             : 
    4898           1 :         res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
    4899             :                                        nvme_completion_poll_cb, status);
    4900           1 :         if (res) {
    4901           0 :                 free(status);
    4902           0 :                 return res;
    4903             :         }
    4904           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4905           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_detach_ns failed!\n");
    4906           0 :                 if (!status->timed_out) {
    4907           0 :                         free(status);
    4908             :                 }
    4909           0 :                 return -ENXIO;
    4910             :         }
    4911           1 :         free(status);
    4912             : 
    4913           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4914             : }
    4915             : 
    4916             : uint32_t
    4917           1 : spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
    4918             : {
    4919             :         struct nvme_completion_poll_status      *status;
    4920             :         int                                     res;
    4921             :         uint32_t                                nsid;
    4922             : 
    4923           1 :         status = calloc(1, sizeof(*status));
    4924           1 :         if (!status) {
    4925           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4926           0 :                 return 0;
    4927             :         }
    4928             : 
    4929           1 :         res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
    4930           1 :         if (res) {
    4931           0 :                 free(status);
    4932           0 :                 return 0;
    4933             :         }
    4934           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4935           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_create_ns failed!\n");
    4936           0 :                 if (!status->timed_out) {
    4937           0 :                         free(status);
    4938             :                 }
    4939           0 :                 return 0;
    4940             :         }
    4941             : 
    4942           1 :         nsid = status->cpl.cdw0;
    4943           1 :         free(status);
    4944             : 
    4945           1 :         assert(nsid > 0);
    4946             : 
    4947             :         /* Return the namespace ID that was created */
    4948           1 :         return nsid;
    4949             : }
    4950             : 
    4951             : int
    4952           1 : spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4953             : {
    4954             :         struct nvme_completion_poll_status      *status;
    4955             :         int                                     res;
    4956             : 
    4957           1 :         if (nsid == 0) {
    4958           0 :                 return -EINVAL;
    4959             :         }
    4960             : 
    4961           1 :         status = calloc(1, sizeof(*status));
    4962           1 :         if (!status) {
    4963           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4964           0 :                 return -ENOMEM;
    4965             :         }
    4966             : 
    4967           1 :         res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
    4968           1 :         if (res) {
    4969           0 :                 free(status);
    4970           0 :                 return res;
    4971             :         }
    4972           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4973           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_delete_ns failed!\n");
    4974           0 :                 if (!status->timed_out) {
    4975           0 :                         free(status);
    4976             :                 }
    4977           0 :                 return -ENXIO;
    4978             :         }
    4979           1 :         free(status);
    4980             : 
    4981           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4982             : }
    4983             : 
    4984             : int
    4985           0 : spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4986             :                        struct spdk_nvme_format *format)
    4987             : {
    4988             :         struct nvme_completion_poll_status      *status;
    4989             :         int                                     res;
    4990             : 
    4991           0 :         status = calloc(1, sizeof(*status));
    4992           0 :         if (!status) {
    4993           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4994           0 :                 return -ENOMEM;
    4995             :         }
    4996             : 
    4997           0 :         res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
    4998             :                                     status);
    4999           0 :         if (res) {
    5000           0 :                 free(status);
    5001           0 :                 return res;
    5002             :         }
    5003           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5004           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_format failed!\n");
    5005           0 :                 if (!status->timed_out) {
    5006           0 :                         free(status);
    5007             :                 }
    5008           0 :                 return -ENXIO;
    5009             :         }
    5010           0 :         free(status);
    5011             : 
    5012           0 :         return spdk_nvme_ctrlr_reset(ctrlr);
    5013             : }
    5014             : 
    5015             : int
    5016           8 : spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
    5017             :                                 int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
    5018             : {
    5019           8 :         struct spdk_nvme_fw_commit              fw_commit;
    5020             :         struct nvme_completion_poll_status      *status;
    5021             :         int                                     res;
    5022             :         unsigned int                            size_remaining;
    5023             :         unsigned int                            offset;
    5024             :         unsigned int                            transfer;
    5025             :         uint8_t                                 *p;
    5026             : 
    5027           8 :         if (!completion_status) {
    5028           0 :                 return -EINVAL;
    5029             :         }
    5030           8 :         memset(completion_status, 0, sizeof(struct spdk_nvme_status));
    5031           8 :         if (size % 4) {
    5032           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid size!\n");
    5033           1 :                 return -1;
    5034             :         }
    5035             : 
    5036             :         /* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
    5037             :          * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
    5038             :          */
    5039           7 :         if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
    5040             :             (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
    5041           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid command!\n");
    5042           0 :                 return -1;
    5043             :         }
    5044             : 
    5045           7 :         status = calloc(1, sizeof(*status));
    5046           7 :         if (!status) {
    5047           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5048           0 :                 return -ENOMEM;
    5049             :         }
    5050             : 
    5051             :         /* Firmware download */
    5052           7 :         size_remaining = size;
    5053           7 :         offset = 0;
    5054           7 :         p = payload;
    5055             : 
    5056          10 :         while (size_remaining > 0) {
    5057           7 :                 transfer = spdk_min(size_remaining, ctrlr->min_page_size);
    5058             : 
    5059           7 :                 memset(status, 0, sizeof(*status));
    5060           7 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
    5061             :                                                        nvme_completion_poll_cb,
    5062             :                                                        status);
    5063           7 :                 if (res) {
    5064           2 :                         free(status);
    5065           2 :                         return res;
    5066             :                 }
    5067             : 
    5068           5 :                 if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5069           2 :                         NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_fw_image_download failed!\n");
    5070           2 :                         if (!status->timed_out) {
    5071           1 :                                 free(status);
    5072             :                         }
    5073           2 :                         return -ENXIO;
    5074             :                 }
    5075           3 :                 p += transfer;
    5076           3 :                 offset += transfer;
    5077           3 :                 size_remaining -= transfer;
    5078             :         }
    5079             : 
    5080             :         /* Firmware commit */
    5081           3 :         memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5082           3 :         fw_commit.fs = slot;
    5083           3 :         fw_commit.ca = commit_action;
    5084             : 
    5085           3 :         memset(status, 0, sizeof(*status));
    5086           3 :         res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
    5087             :                                        status);
    5088           3 :         if (res) {
    5089           1 :                 free(status);
    5090           1 :                 return res;
    5091             :         }
    5092             : 
    5093           2 :         res = nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
    5094             : 
    5095           2 :         memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
    5096             : 
    5097           2 :         if (!status->timed_out) {
    5098           2 :                 free(status);
    5099             :         }
    5100             : 
    5101           2 :         if (res) {
    5102           1 :                 if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
    5103           0 :                     completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
    5104           1 :                         if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
    5105           0 :                             completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
    5106           0 :                                 NVME_CTRLR_NOTICELOG(ctrlr,
    5107             :                                                      "firmware activation requires conventional reset to be performed. !\n");
    5108             :                         } else {
    5109           1 :                                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5110             :                         }
    5111           1 :                         return -ENXIO;
    5112             :                 }
    5113             :         }
    5114             : 
    5115           1 :         return spdk_nvme_ctrlr_reset(ctrlr);
    5116             : }
    5117             : 
    5118             : int
    5119           0 : spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
    5120             : {
    5121             :         int rc, size;
    5122             :         union spdk_nvme_cmbsz_register cmbsz;
    5123             : 
    5124           0 :         cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
    5125             : 
    5126           0 :         if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
    5127           0 :                 return -ENOTSUP;
    5128             :         }
    5129             : 
    5130           0 :         size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
    5131             : 
    5132           0 :         nvme_ctrlr_lock(ctrlr);
    5133           0 :         rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
    5134           0 :         nvme_ctrlr_unlock(ctrlr);
    5135             : 
    5136           0 :         if (rc < 0) {
    5137           0 :                 return rc;
    5138             :         }
    5139             : 
    5140           0 :         return size;
    5141             : }
    5142             : 
    5143             : void *
    5144           0 : spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5145             : {
    5146             :         void *buf;
    5147             : 
    5148           0 :         nvme_ctrlr_lock(ctrlr);
    5149           0 :         buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
    5150           0 :         nvme_ctrlr_unlock(ctrlr);
    5151             : 
    5152           0 :         return buf;
    5153             : }
    5154             : 
    5155             : void
    5156           0 : spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
    5157             : {
    5158           0 :         nvme_ctrlr_lock(ctrlr);
    5159           0 :         nvme_transport_ctrlr_unmap_cmb(ctrlr);
    5160           0 :         nvme_ctrlr_unlock(ctrlr);
    5161           0 : }
    5162             : 
    5163             : int
    5164           0 : spdk_nvme_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5165             : {
    5166             :         int rc;
    5167             : 
    5168           0 :         nvme_ctrlr_lock(ctrlr);
    5169           0 :         rc = nvme_transport_ctrlr_enable_pmr(ctrlr);
    5170           0 :         nvme_ctrlr_unlock(ctrlr);
    5171             : 
    5172           0 :         return rc;
    5173             : }
    5174             : 
    5175             : int
    5176           0 : spdk_nvme_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5177             : {
    5178             :         int rc;
    5179             : 
    5180           0 :         nvme_ctrlr_lock(ctrlr);
    5181           0 :         rc = nvme_transport_ctrlr_disable_pmr(ctrlr);
    5182           0 :         nvme_ctrlr_unlock(ctrlr);
    5183             : 
    5184           0 :         return rc;
    5185             : }
    5186             : 
    5187             : void *
    5188           0 : spdk_nvme_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5189             : {
    5190             :         void *buf;
    5191             : 
    5192           0 :         nvme_ctrlr_lock(ctrlr);
    5193           0 :         buf = nvme_transport_ctrlr_map_pmr(ctrlr, size);
    5194           0 :         nvme_ctrlr_unlock(ctrlr);
    5195             : 
    5196           0 :         return buf;
    5197             : }
    5198             : 
    5199             : int
    5200           0 : spdk_nvme_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5201             : {
    5202             :         int rc;
    5203             : 
    5204           0 :         nvme_ctrlr_lock(ctrlr);
    5205           0 :         rc = nvme_transport_ctrlr_unmap_pmr(ctrlr);
    5206           0 :         nvme_ctrlr_unlock(ctrlr);
    5207             : 
    5208           0 :         return rc;
    5209             : }
    5210             : 
    5211             : int
    5212           0 : spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *payload,
    5213             :                 uint32_t bprsz, uint32_t bprof, uint32_t bpid)
    5214             : {
    5215           0 :         union spdk_nvme_bprsel_register bprsel;
    5216           0 :         union spdk_nvme_bpinfo_register bpinfo;
    5217           0 :         uint64_t bpmbl, bpmb_size;
    5218             : 
    5219           0 :         if (ctrlr->cap.bits.bps == 0) {
    5220           0 :                 return -ENOTSUP;
    5221             :         }
    5222             : 
    5223           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5224           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5225           0 :                 return -EIO;
    5226             :         }
    5227             : 
    5228           0 :         if (bpinfo.bits.brs == SPDK_NVME_BRS_READ_IN_PROGRESS) {
    5229           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read already initiated\n");
    5230           0 :                 return -EALREADY;
    5231             :         }
    5232             : 
    5233           0 :         nvme_ctrlr_lock(ctrlr);
    5234             : 
    5235           0 :         bpmb_size = bprsz * 4096;
    5236           0 :         bpmbl = spdk_vtophys(payload, &bpmb_size);
    5237           0 :         if (bpmbl == SPDK_VTOPHYS_ERROR) {
    5238           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_vtophys of bpmbl failed\n");
    5239           0 :                 nvme_ctrlr_unlock(ctrlr);
    5240           0 :                 return -EFAULT;
    5241             :         }
    5242             : 
    5243           0 :         if (bpmb_size != bprsz * 4096) {
    5244           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition buffer is not physically contiguous\n");
    5245           0 :                 nvme_ctrlr_unlock(ctrlr);
    5246           0 :                 return -EFAULT;
    5247             :         }
    5248             : 
    5249           0 :         if (nvme_ctrlr_set_bpmbl(ctrlr, bpmbl)) {
    5250           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bpmbl() failed\n");
    5251           0 :                 nvme_ctrlr_unlock(ctrlr);
    5252           0 :                 return -EIO;
    5253             :         }
    5254             : 
    5255           0 :         bprsel.bits.bpid = bpid;
    5256           0 :         bprsel.bits.bprof = bprof;
    5257           0 :         bprsel.bits.bprsz = bprsz;
    5258             : 
    5259           0 :         if (nvme_ctrlr_set_bprsel(ctrlr, &bprsel)) {
    5260           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bprsel() failed\n");
    5261           0 :                 nvme_ctrlr_unlock(ctrlr);
    5262           0 :                 return -EIO;
    5263             :         }
    5264             : 
    5265           0 :         nvme_ctrlr_unlock(ctrlr);
    5266           0 :         return 0;
    5267             : }
    5268             : 
    5269             : int
    5270           0 : spdk_nvme_ctrlr_read_boot_partition_poll(struct spdk_nvme_ctrlr *ctrlr)
    5271             : {
    5272           0 :         int rc = 0;
    5273           0 :         union spdk_nvme_bpinfo_register bpinfo;
    5274             : 
    5275           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5276           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5277           0 :                 return -EIO;
    5278             :         }
    5279             : 
    5280           0 :         switch (bpinfo.bits.brs) {
    5281           0 :         case SPDK_NVME_BRS_NO_READ:
    5282           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read not initiated\n");
    5283           0 :                 rc = -EINVAL;
    5284           0 :                 break;
    5285           0 :         case SPDK_NVME_BRS_READ_IN_PROGRESS:
    5286           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition read in progress\n");
    5287           0 :                 rc = -EAGAIN;
    5288           0 :                 break;
    5289           0 :         case SPDK_NVME_BRS_READ_ERROR:
    5290           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Error completing Boot Partition read\n");
    5291           0 :                 rc = -EIO;
    5292           0 :                 break;
    5293           0 :         case SPDK_NVME_BRS_READ_SUCCESS:
    5294           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Boot Partition read completed successfully\n");
    5295           0 :                 break;
    5296           0 :         default:
    5297           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition read status\n");
    5298           0 :                 rc = -EINVAL;
    5299             :         }
    5300             : 
    5301           0 :         return rc;
    5302             : }
    5303             : 
    5304             : static void
    5305           0 : nvme_write_boot_partition_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    5306             : {
    5307             :         int res;
    5308           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    5309           0 :         struct spdk_nvme_fw_commit fw_commit;
    5310           0 :         struct spdk_nvme_cpl err_cpl =
    5311             :         {.status = {.sct = SPDK_NVME_SCT_GENERIC, .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR }};
    5312             : 
    5313           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    5314           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Write Boot Partition failed\n");
    5315           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5316           0 :                 return;
    5317             :         }
    5318             : 
    5319           0 :         if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADING) {
    5320           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Downloading at Offset %d Success\n", ctrlr->fw_offset);
    5321           0 :                 ctrlr->fw_payload = (uint8_t *)ctrlr->fw_payload + ctrlr->fw_transfer_size;
    5322           0 :                 ctrlr->fw_offset += ctrlr->fw_transfer_size;
    5323           0 :                 ctrlr->fw_size_remaining -= ctrlr->fw_transfer_size;
    5324           0 :                 ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5325           0 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5326             :                                                        ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5327           0 :                 if (res) {
    5328           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_image_download failed!\n");
    5329           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5330           0 :                         return;
    5331             :                 }
    5332             : 
    5333           0 :                 if (ctrlr->fw_transfer_size < ctrlr->min_page_size) {
    5334           0 :                         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADED;
    5335             :                 }
    5336           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADED) {
    5337           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Download Success\n");
    5338           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5339           0 :                 fw_commit.bpid = ctrlr->bpid;
    5340           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_BOOT_PARTITION;
    5341           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5342             :                                                nvme_write_boot_partition_cb, ctrlr);
    5343           0 :                 if (res) {
    5344           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5345           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5346           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5347           0 :                         return;
    5348             :                 }
    5349             : 
    5350           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_REPLACE;
    5351           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_REPLACE) {
    5352           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Replacement Success\n");
    5353           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5354           0 :                 fw_commit.bpid = ctrlr->bpid;
    5355           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_ACTIVATE_BOOT_PARTITION;
    5356           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5357             :                                                nvme_write_boot_partition_cb, ctrlr);
    5358           0 :                 if (res) {
    5359           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5360           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5361           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5362           0 :                         return;
    5363             :                 }
    5364             : 
    5365           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_ACTIVATE;
    5366           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_ACTIVATE) {
    5367           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Activation Success\n");
    5368           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5369             :         } else {
    5370           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition write state\n");
    5371           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5372           0 :                 return;
    5373             :         }
    5374             : }
    5375             : 
    5376             : int
    5377           0 : spdk_nvme_ctrlr_write_boot_partition(struct spdk_nvme_ctrlr *ctrlr,
    5378             :                                      void *payload, uint32_t size, uint32_t bpid,
    5379             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    5380             : {
    5381             :         int res;
    5382             : 
    5383           0 :         if (ctrlr->cap.bits.bps == 0) {
    5384           0 :                 return -ENOTSUP;
    5385             :         }
    5386             : 
    5387           0 :         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADING;
    5388           0 :         ctrlr->bpid = bpid;
    5389           0 :         ctrlr->bp_write_cb_fn = cb_fn;
    5390           0 :         ctrlr->bp_write_cb_arg = cb_arg;
    5391           0 :         ctrlr->fw_offset = 0;
    5392           0 :         ctrlr->fw_size_remaining = size;
    5393           0 :         ctrlr->fw_payload = payload;
    5394           0 :         ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5395             : 
    5396           0 :         res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5397             :                                                ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5398             : 
    5399           0 :         return res;
    5400             : }
    5401             : 
    5402             : bool
    5403          43 : spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
    5404             : {
    5405          43 :         assert(ctrlr);
    5406             : 
    5407          43 :         return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
    5408             :                         strlen(SPDK_NVMF_DISCOVERY_NQN));
    5409             : }
    5410             : 
    5411             : bool
    5412          20 : spdk_nvme_ctrlr_is_fabrics(struct spdk_nvme_ctrlr *ctrlr)
    5413             : {
    5414          20 :         assert(ctrlr);
    5415             : 
    5416          20 :         return spdk_nvme_trtype_is_fabrics(ctrlr->trid.trtype);
    5417             : }
    5418             : 
    5419             : int
    5420           0 : spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5421             :                                  uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5422             : {
    5423             :         struct nvme_completion_poll_status      *status;
    5424             :         int                                     res;
    5425             : 
    5426           0 :         status = calloc(1, sizeof(*status));
    5427           0 :         if (!status) {
    5428           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5429           0 :                 return -ENOMEM;
    5430             :         }
    5431             : 
    5432           0 :         res = spdk_nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
    5433             :                         nvme_completion_poll_cb, status);
    5434           0 :         if (res) {
    5435           0 :                 free(status);
    5436           0 :                 return res;
    5437             :         }
    5438           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5439           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_receive failed!\n");
    5440           0 :                 if (!status->timed_out) {
    5441           0 :                         free(status);
    5442             :                 }
    5443           0 :                 return -ENXIO;
    5444             :         }
    5445           0 :         free(status);
    5446             : 
    5447           0 :         return 0;
    5448             : }
    5449             : 
    5450             : int
    5451           0 : spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5452             :                               uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5453             : {
    5454             :         struct nvme_completion_poll_status      *status;
    5455             :         int                                     res;
    5456             : 
    5457           0 :         status = calloc(1, sizeof(*status));
    5458           0 :         if (!status) {
    5459           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5460           0 :                 return -ENOMEM;
    5461             :         }
    5462             : 
    5463           0 :         res = spdk_nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size,
    5464             :                                                 nvme_completion_poll_cb,
    5465             :                                                 status);
    5466           0 :         if (res) {
    5467           0 :                 free(status);
    5468           0 :                 return res;
    5469             :         }
    5470           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5471           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_send failed!\n");
    5472           0 :                 if (!status->timed_out) {
    5473           0 :                         free(status);
    5474             :                 }
    5475           0 :                 return -ENXIO;
    5476             :         }
    5477             : 
    5478           0 :         free(status);
    5479             : 
    5480           0 :         return 0;
    5481             : }
    5482             : 
    5483             : uint64_t
    5484           1 : spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
    5485             : {
    5486           1 :         return ctrlr->flags;
    5487             : }
    5488             : 
    5489             : const struct spdk_nvme_transport_id *
    5490           0 : spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
    5491             : {
    5492           0 :         return &ctrlr->trid;
    5493             : }
    5494             : 
    5495             : int32_t
    5496          17 : spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
    5497             : {
    5498             :         uint32_t qid;
    5499             : 
    5500          17 :         assert(ctrlr->free_io_qids);
    5501          17 :         nvme_ctrlr_lock(ctrlr);
    5502          17 :         qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
    5503          17 :         if (qid > ctrlr->opts.num_io_queues) {
    5504           2 :                 NVME_CTRLR_ERRLOG(ctrlr, "No free I/O queue IDs\n");
    5505           2 :                 nvme_ctrlr_unlock(ctrlr);
    5506           2 :                 return -1;
    5507             :         }
    5508             : 
    5509          15 :         spdk_bit_array_clear(ctrlr->free_io_qids, qid);
    5510          15 :         nvme_ctrlr_unlock(ctrlr);
    5511          15 :         return qid;
    5512             : }
    5513             : 
    5514             : void
    5515          64 : spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
    5516             : {
    5517          64 :         assert(qid <= ctrlr->opts.num_io_queues);
    5518             : 
    5519          64 :         nvme_ctrlr_lock(ctrlr);
    5520             : 
    5521          64 :         if (spdk_likely(ctrlr->free_io_qids)) {
    5522          64 :                 spdk_bit_array_set(ctrlr->free_io_qids, qid);
    5523             :         }
    5524             : 
    5525          64 :         nvme_ctrlr_unlock(ctrlr);
    5526          64 : }
    5527             : 
    5528             : int
    5529           2 : spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
    5530             :                                    struct spdk_memory_domain **domains, int array_size)
    5531             : {
    5532           2 :         return nvme_transport_ctrlr_get_memory_domains(ctrlr, domains, array_size);
    5533             : }
    5534             : 
    5535             : int
    5536           0 : spdk_nvme_ctrlr_authenticate(struct spdk_nvme_ctrlr *ctrlr,
    5537             :                              spdk_nvme_authenticate_cb cb_fn, void *cb_ctx)
    5538             : {
    5539           0 :         return spdk_nvme_qpair_authenticate(ctrlr->adminq, cb_fn, cb_ctx);
    5540             : }

Generated by: LCOV version 1.15