LCOV - code coverage report
Current view: top level - lib/nvme - nvme_ctrlr.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 1752 2932 59.8 %
Date: 2024-11-02 22:57:06 Functions: 141 207 68.1 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
       3             :  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
       4             :  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       5             :  */
       6             : 
       7             : #include "spdk/stdinc.h"
       8             : 
       9             : #include "nvme_internal.h"
      10             : #include "nvme_io_msg.h"
      11             : 
      12             : #include "spdk/env.h"
      13             : #include "spdk/string.h"
      14             : #include "spdk/endian.h"
      15             : 
      16             : struct nvme_active_ns_ctx;
      17             : 
      18             : static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
      19             :                 struct nvme_async_event_request *aer);
      20             : static void nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx);
      21             : static int nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns);
      22             : static int nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns);
      23             : static int nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns);
      24             : static void nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr);
      25             : static void nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
      26             :                                  uint64_t timeout_in_ms);
      27             : 
      28             : static int
      29      477891 : nvme_ns_cmp(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2)
      30             : {
      31      477891 :         if (ns1->id < ns2->id) {
      32      164867 :                 return -1;
      33      313024 :         } else if (ns1->id > ns2->id) {
      34      276062 :                 return 1;
      35             :         } else {
      36       36962 :                 return 0;
      37             :         }
      38      477891 : }
      39             : 
      40      650535 : RB_GENERATE_STATIC(nvme_ns_tree, spdk_nvme_ns, node, nvme_ns_cmp);
      41             : 
      42             : #define nvme_ctrlr_get_reg_async(ctrlr, reg, sz, cb_fn, cb_arg) \
      43             :         nvme_transport_ctrlr_get_reg_ ## sz ## _async(ctrlr, \
      44             :                 offsetof(struct spdk_nvme_registers, reg), cb_fn, cb_arg)
      45             : 
      46             : #define nvme_ctrlr_set_reg_async(ctrlr, reg, sz, val, cb_fn, cb_arg) \
      47             :         nvme_transport_ctrlr_set_reg_ ## sz ## _async(ctrlr, \
      48             :                 offsetof(struct spdk_nvme_registers, reg), val, cb_fn, cb_arg)
      49             : 
      50             : #define nvme_ctrlr_get_cc_async(ctrlr, cb_fn, cb_arg) \
      51             :         nvme_ctrlr_get_reg_async(ctrlr, cc, 4, cb_fn, cb_arg)
      52             : 
      53             : #define nvme_ctrlr_get_csts_async(ctrlr, cb_fn, cb_arg) \
      54             :         nvme_ctrlr_get_reg_async(ctrlr, csts, 4, cb_fn, cb_arg)
      55             : 
      56             : #define nvme_ctrlr_get_cap_async(ctrlr, cb_fn, cb_arg) \
      57             :         nvme_ctrlr_get_reg_async(ctrlr, cap, 8, cb_fn, cb_arg)
      58             : 
      59             : #define nvme_ctrlr_get_vs_async(ctrlr, cb_fn, cb_arg) \
      60             :         nvme_ctrlr_get_reg_async(ctrlr, vs, 4, cb_fn, cb_arg)
      61             : 
      62             : #define nvme_ctrlr_set_cc_async(ctrlr, value, cb_fn, cb_arg) \
      63             :         nvme_ctrlr_set_reg_async(ctrlr, cc, 4, value, cb_fn, cb_arg)
      64             : 
      65             : static int
      66           0 : nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
      67             : {
      68           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
      69           0 :                                               &cc->raw);
      70             : }
      71             : 
      72             : static int
      73           0 : nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
      74             : {
      75           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
      76           0 :                                               &csts->raw);
      77             : }
      78             : 
      79             : int
      80           0 : nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
      81             : {
      82           0 :         return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
      83           0 :                                               &cap->raw);
      84             : }
      85             : 
      86             : int
      87           1 : nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
      88             : {
      89           2 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
      90           1 :                                               &vs->raw);
      91             : }
      92             : 
      93             : int
      94           0 : nvme_ctrlr_get_cmbsz(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cmbsz_register *cmbsz)
      95             : {
      96           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
      97           0 :                                               &cmbsz->raw);
      98             : }
      99             : 
     100             : int
     101           0 : nvme_ctrlr_get_pmrcap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_pmrcap_register *pmrcap)
     102             : {
     103           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
     104           0 :                                               &pmrcap->raw);
     105             : }
     106             : 
     107             : int
     108           0 : nvme_ctrlr_get_bpinfo(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bpinfo_register *bpinfo)
     109             : {
     110           0 :         return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bpinfo.raw),
     111           0 :                                               &bpinfo->raw);
     112             : }
     113             : 
     114             : int
     115           0 : nvme_ctrlr_set_bprsel(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_bprsel_register *bprsel)
     116             : {
     117           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, bprsel.raw),
     118           0 :                                               bprsel->raw);
     119             : }
     120             : 
     121             : int
     122           0 : nvme_ctrlr_set_bpmbl(struct spdk_nvme_ctrlr *ctrlr, uint64_t bpmbl_value)
     123             : {
     124           0 :         return nvme_transport_ctrlr_set_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, bpmbl),
     125           0 :                                               bpmbl_value);
     126             : }
     127             : 
     128             : static int
     129           0 : nvme_ctrlr_set_nssr(struct spdk_nvme_ctrlr *ctrlr, uint32_t nssr_value)
     130             : {
     131           0 :         return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, nssr),
     132           0 :                                               nssr_value);
     133             : }
     134             : 
     135             : bool
     136          33 : nvme_ctrlr_multi_iocs_enabled(struct spdk_nvme_ctrlr *ctrlr)
     137             : {
     138          35 :         return ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS &&
     139           2 :                ctrlr->opts.command_set == SPDK_NVME_CC_CSS_IOCS;
     140             : }
     141             : 
     142             : /* When the field in spdk_nvme_ctrlr_opts are changed and you change this function, please
     143             :  * also update the nvme_ctrl_opts_init function in nvme_ctrlr.c
     144             :  */
     145             : void
     146           2 : spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
     147             : {
     148           2 :         assert(opts);
     149             : 
     150           2 :         opts->opts_size = opts_size;
     151             : 
     152             : #define FIELD_OK(field) \
     153             :         offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size
     154             : 
     155             : #define SET_FIELD(field, value) \
     156             :         if (offsetof(struct spdk_nvme_ctrlr_opts, field) + sizeof(opts->field) <= opts_size) { \
     157             :                 opts->field = value; \
     158             :         } \
     159             : 
     160           2 :         SET_FIELD(num_io_queues, DEFAULT_MAX_IO_QUEUES);
     161           2 :         SET_FIELD(use_cmb_sqs, false);
     162           2 :         SET_FIELD(no_shn_notification, false);
     163           2 :         SET_FIELD(arb_mechanism, SPDK_NVME_CC_AMS_RR);
     164           2 :         SET_FIELD(arbitration_burst, 0);
     165           2 :         SET_FIELD(low_priority_weight, 0);
     166           2 :         SET_FIELD(medium_priority_weight, 0);
     167           2 :         SET_FIELD(high_priority_weight, 0);
     168           2 :         SET_FIELD(keep_alive_timeout_ms, MIN_KEEP_ALIVE_TIMEOUT_IN_MS);
     169           2 :         SET_FIELD(transport_retry_count, SPDK_NVME_DEFAULT_RETRY_COUNT);
     170           2 :         SET_FIELD(io_queue_size, DEFAULT_IO_QUEUE_SIZE);
     171             : 
     172           2 :         if (nvme_driver_init() == 0) {
     173           2 :                 if (FIELD_OK(hostnqn)) {
     174           1 :                         nvme_get_default_hostnqn(opts->hostnqn, sizeof(opts->hostnqn));
     175           1 :                 }
     176             : 
     177           2 :                 if (FIELD_OK(extended_host_id)) {
     178           1 :                         memcpy(opts->extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
     179             :                                sizeof(opts->extended_host_id));
     180           1 :                 }
     181             : 
     182           2 :         }
     183             : 
     184           2 :         SET_FIELD(io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
     185             : 
     186           2 :         if (FIELD_OK(src_addr)) {
     187           1 :                 memset(opts->src_addr, 0, sizeof(opts->src_addr));
     188           1 :         }
     189             : 
     190           2 :         if (FIELD_OK(src_svcid)) {
     191           1 :                 memset(opts->src_svcid, 0, sizeof(opts->src_svcid));
     192           1 :         }
     193             : 
     194           2 :         if (FIELD_OK(host_id)) {
     195           1 :                 memset(opts->host_id, 0, sizeof(opts->host_id));
     196           1 :         }
     197             : 
     198           2 :         SET_FIELD(command_set, CHAR_BIT);
     199           2 :         SET_FIELD(admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
     200           2 :         SET_FIELD(header_digest, false);
     201           2 :         SET_FIELD(data_digest, false);
     202           2 :         SET_FIELD(disable_error_logging, false);
     203           2 :         SET_FIELD(transport_ack_timeout, SPDK_NVME_DEFAULT_TRANSPORT_ACK_TIMEOUT);
     204           2 :         SET_FIELD(admin_queue_size, DEFAULT_ADMIN_QUEUE_SIZE);
     205           2 :         SET_FIELD(fabrics_connect_timeout_us, NVME_FABRIC_CONNECT_COMMAND_TIMEOUT);
     206           2 :         SET_FIELD(disable_read_ana_log_page, false);
     207           2 :         SET_FIELD(disable_read_changed_ns_list_log_page, false);
     208           2 :         SET_FIELD(tls_psk, NULL);
     209           2 :         SET_FIELD(dhchap_key, NULL);
     210           2 :         SET_FIELD(dhchap_ctrlr_key, NULL);
     211           2 :         SET_FIELD(dhchap_digests,
     212             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) |
     213             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) |
     214             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512));
     215           2 :         SET_FIELD(dhchap_dhgroups,
     216             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) |
     217             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) |
     218             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) |
     219             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) |
     220             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) |
     221             :                   SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192));
     222             : #undef FIELD_OK
     223             : #undef SET_FIELD
     224           2 : }
     225             : 
     226             : const struct spdk_nvme_ctrlr_opts *
     227           0 : spdk_nvme_ctrlr_get_opts(struct spdk_nvme_ctrlr *ctrlr)
     228             : {
     229           0 :         return &ctrlr->opts;
     230             : }
     231             : 
     232             : /**
     233             :  * This function will be called when the process allocates the IO qpair.
     234             :  * Note: the ctrlr_lock must be held when calling this function.
     235             :  */
     236             : static void
     237          15 : nvme_ctrlr_proc_add_io_qpair(struct spdk_nvme_qpair *qpair)
     238             : {
     239             :         struct spdk_nvme_ctrlr_process  *active_proc;
     240          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     241             : 
     242          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     243          15 :         if (active_proc) {
     244           0 :                 TAILQ_INSERT_TAIL(&active_proc->allocated_io_qpairs, qpair, per_process_tailq);
     245           0 :                 qpair->active_proc = active_proc;
     246           0 :         }
     247          15 : }
     248             : 
     249             : /**
     250             :  * This function will be called when the process frees the IO qpair.
     251             :  * Note: the ctrlr_lock must be held when calling this function.
     252             :  */
     253             : static void
     254          15 : nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
     255             : {
     256             :         struct spdk_nvme_ctrlr_process  *active_proc;
     257          15 :         struct spdk_nvme_ctrlr          *ctrlr = qpair->ctrlr;
     258             :         struct spdk_nvme_qpair          *active_qpair, *tmp_qpair;
     259             : 
     260          15 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
     261          15 :         if (!active_proc) {
     262          15 :                 return;
     263             :         }
     264             : 
     265           0 :         TAILQ_FOREACH_SAFE(active_qpair, &active_proc->allocated_io_qpairs,
     266             :                            per_process_tailq, tmp_qpair) {
     267           0 :                 if (active_qpair == qpair) {
     268           0 :                         TAILQ_REMOVE(&active_proc->allocated_io_qpairs,
     269             :                                      active_qpair, per_process_tailq);
     270             : 
     271           0 :                         break;
     272             :                 }
     273           0 :         }
     274          15 : }
     275             : 
     276             : void
     277          27 : spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
     278             :                 struct spdk_nvme_io_qpair_opts *opts,
     279             :                 size_t opts_size)
     280             : {
     281          27 :         assert(ctrlr);
     282             : 
     283          27 :         assert(opts);
     284             : 
     285          27 :         memset(opts, 0, opts_size);
     286             : 
     287             : #define FIELD_OK(field) \
     288             :         offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
     289             : 
     290          27 :         if (FIELD_OK(qprio)) {
     291          27 :                 opts->qprio = SPDK_NVME_QPRIO_URGENT;
     292          27 :         }
     293             : 
     294          27 :         if (FIELD_OK(io_queue_size)) {
     295          27 :                 opts->io_queue_size = ctrlr->opts.io_queue_size;
     296          27 :         }
     297             : 
     298          27 :         if (FIELD_OK(io_queue_requests)) {
     299          26 :                 opts->io_queue_requests = ctrlr->opts.io_queue_requests;
     300          26 :         }
     301             : 
     302          27 :         if (FIELD_OK(delay_cmd_submit)) {
     303          26 :                 opts->delay_cmd_submit = false;
     304          26 :         }
     305             : 
     306          27 :         if (FIELD_OK(sq.vaddr)) {
     307          26 :                 opts->sq.vaddr = NULL;
     308          26 :         }
     309             : 
     310          27 :         if (FIELD_OK(sq.paddr)) {
     311          26 :                 opts->sq.paddr = 0;
     312          26 :         }
     313             : 
     314          27 :         if (FIELD_OK(sq.buffer_size)) {
     315          26 :                 opts->sq.buffer_size = 0;
     316          26 :         }
     317             : 
     318          27 :         if (FIELD_OK(cq.vaddr)) {
     319          26 :                 opts->cq.vaddr = NULL;
     320          26 :         }
     321             : 
     322          27 :         if (FIELD_OK(cq.paddr)) {
     323          26 :                 opts->cq.paddr = 0;
     324          26 :         }
     325             : 
     326          27 :         if (FIELD_OK(cq.buffer_size)) {
     327          26 :                 opts->cq.buffer_size = 0;
     328          26 :         }
     329             : 
     330          27 :         if (FIELD_OK(create_only)) {
     331          26 :                 opts->create_only = false;
     332          26 :         }
     333             : 
     334          27 :         if (FIELD_OK(async_mode)) {
     335          26 :                 opts->async_mode = false;
     336          26 :         }
     337             : 
     338             : #undef FIELD_OK
     339          27 : }
     340             : 
     341             : static struct spdk_nvme_qpair *
     342          22 : nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     343             :                            const struct spdk_nvme_io_qpair_opts *opts)
     344             : {
     345             :         int32_t                                 qid;
     346             :         struct spdk_nvme_qpair                  *qpair;
     347             :         union spdk_nvme_cc_register             cc;
     348             : 
     349          22 :         if (!ctrlr) {
     350           0 :                 return NULL;
     351             :         }
     352             : 
     353          22 :         nvme_ctrlr_lock(ctrlr);
     354          22 :         cc.raw = ctrlr->process_init_cc.raw;
     355             : 
     356          22 :         if (opts->qprio & ~SPDK_NVME_CREATE_IO_SQ_QPRIO_MASK) {
     357           2 :                 nvme_ctrlr_unlock(ctrlr);
     358           2 :                 return NULL;
     359             :         }
     360             : 
     361             :         /*
     362             :          * Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
     363             :          * default round robin arbitration method.
     364             :          */
     365          20 :         if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts->qprio != SPDK_NVME_QPRIO_URGENT)) {
     366           3 :                 NVME_CTRLR_ERRLOG(ctrlr, "invalid queue priority for default round robin arbitration method\n");
     367           3 :                 nvme_ctrlr_unlock(ctrlr);
     368           3 :                 return NULL;
     369             :         }
     370             : 
     371          17 :         qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
     372          17 :         if (qid < 0) {
     373           2 :                 nvme_ctrlr_unlock(ctrlr);
     374           2 :                 return NULL;
     375             :         }
     376             : 
     377          15 :         qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
     378          15 :         if (qpair == NULL) {
     379           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_create_io_qpair() failed\n");
     380           0 :                 spdk_nvme_ctrlr_free_qid(ctrlr, qid);
     381           0 :                 nvme_ctrlr_unlock(ctrlr);
     382           0 :                 return NULL;
     383             :         }
     384             : 
     385          15 :         TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
     386             : 
     387          15 :         nvme_ctrlr_proc_add_io_qpair(qpair);
     388             : 
     389          15 :         nvme_ctrlr_unlock(ctrlr);
     390             : 
     391          15 :         return qpair;
     392          22 : }
     393             : 
     394             : int
     395          15 : spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
     396             : {
     397             :         int rc;
     398             : 
     399          15 :         if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
     400           0 :                 return -EISCONN;
     401             :         }
     402             : 
     403          15 :         nvme_ctrlr_lock(ctrlr);
     404          15 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     405          15 :         nvme_ctrlr_unlock(ctrlr);
     406             : 
     407          15 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_AFTER_QUEUE_ALLOC) {
     408           0 :                 spdk_delay_us(100);
     409           0 :         }
     410             : 
     411          15 :         return rc;
     412          15 : }
     413             : 
     414             : void
     415           0 : spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     416             : {
     417           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     418             : 
     419           0 :         nvme_ctrlr_lock(ctrlr);
     420           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     421           0 :         nvme_ctrlr_unlock(ctrlr);
     422           0 : }
     423             : 
     424             : struct spdk_nvme_qpair *
     425          23 : spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
     426             :                                const struct spdk_nvme_io_qpair_opts *user_opts,
     427             :                                size_t opts_size)
     428             : {
     429             : 
     430          23 :         struct spdk_nvme_qpair          *qpair = NULL;
     431             :         struct spdk_nvme_io_qpair_opts  opts;
     432             :         int                             rc;
     433             : 
     434          23 :         nvme_ctrlr_lock(ctrlr);
     435             : 
     436          23 :         if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
     437             :                 /* When controller is resetting or initializing, free_io_qids is deleted or not created yet.
     438             :                  * We can't create IO qpair in that case */
     439           1 :                 goto unlock;
     440             :         }
     441             : 
     442             :         /*
     443             :          * Get the default options, then overwrite them with the user-provided options
     444             :          * up to opts_size.
     445             :          *
     446             :          * This allows for extensions of the opts structure without breaking
     447             :          * ABI compatibility.
     448             :          */
     449          22 :         spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
     450          22 :         if (user_opts) {
     451          18 :                 memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
     452             : 
     453             :                 /* If user passes buffers, make sure they're big enough for the requested queue size */
     454          18 :                 if (opts.sq.vaddr) {
     455           0 :                         if (opts.sq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cmd))) {
     456           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "sq buffer size %" PRIx64 " is too small for sq size %zx\n",
     457             :                                                   opts.sq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cmd)));
     458           0 :                                 goto unlock;
     459             :                         }
     460           0 :                 }
     461          18 :                 if (opts.cq.vaddr) {
     462           0 :                         if (opts.cq.buffer_size < (opts.io_queue_size * sizeof(struct spdk_nvme_cpl))) {
     463           0 :                                 NVME_CTRLR_ERRLOG(ctrlr, "cq buffer size %" PRIx64 " is too small for cq size %zx\n",
     464             :                                                   opts.cq.buffer_size, (opts.io_queue_size * sizeof(struct spdk_nvme_cpl)));
     465           0 :                                 goto unlock;
     466             :                         }
     467           0 :                 }
     468          18 :         }
     469             : 
     470          22 :         qpair = nvme_ctrlr_create_io_qpair(ctrlr, &opts);
     471             : 
     472          22 :         if (qpair == NULL || opts.create_only == true) {
     473           7 :                 goto unlock;
     474             :         }
     475             : 
     476          15 :         rc = spdk_nvme_ctrlr_connect_io_qpair(ctrlr, qpair);
     477          15 :         if (rc != 0) {
     478           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_transport_ctrlr_connect_io_qpair() failed\n");
     479           1 :                 nvme_ctrlr_proc_remove_io_qpair(qpair);
     480           1 :                 TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     481           1 :                 spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
     482           1 :                 nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     483           1 :                 qpair = NULL;
     484           1 :                 goto unlock;
     485             :         }
     486             : 
     487             : unlock:
     488          23 :         nvme_ctrlr_unlock(ctrlr);
     489             : 
     490          23 :         return qpair;
     491             : }
     492             : 
     493             : int
     494           8 : spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
     495             : {
     496             :         struct spdk_nvme_ctrlr *ctrlr;
     497             :         enum nvme_qpair_state qpair_state;
     498             :         int rc;
     499             : 
     500           8 :         assert(qpair != NULL);
     501           8 :         assert(nvme_qpair_is_admin_queue(qpair) == false);
     502           8 :         assert(qpair->ctrlr != NULL);
     503             : 
     504           8 :         ctrlr = qpair->ctrlr;
     505           8 :         nvme_ctrlr_lock(ctrlr);
     506           8 :         qpair_state = nvme_qpair_get_state(qpair);
     507             : 
     508           8 :         if (ctrlr->is_removed) {
     509           2 :                 rc = -ENODEV;
     510           2 :                 goto out;
     511             :         }
     512             : 
     513           6 :         if (ctrlr->is_resetting || qpair_state == NVME_QPAIR_DISCONNECTING) {
     514           2 :                 rc = -EAGAIN;
     515           2 :                 goto out;
     516             :         }
     517             : 
     518           4 :         if (ctrlr->is_failed || qpair_state == NVME_QPAIR_DESTROYING) {
     519           2 :                 rc = -ENXIO;
     520           2 :                 goto out;
     521             :         }
     522             : 
     523           2 :         if (qpair_state != NVME_QPAIR_DISCONNECTED) {
     524           1 :                 rc = 0;
     525           1 :                 goto out;
     526             :         }
     527             : 
     528           1 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
     529           1 :         if (rc) {
     530           0 :                 rc = -EAGAIN;
     531           0 :                 goto out;
     532             :         }
     533             : 
     534             : out:
     535           8 :         nvme_ctrlr_unlock(ctrlr);
     536           8 :         return rc;
     537             : }
     538             : 
     539             : spdk_nvme_qp_failure_reason
     540           0 : spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr)
     541             : {
     542           0 :         return ctrlr->adminq->transport_failure_reason;
     543             : }
     544             : 
     545             : /*
     546             :  * This internal function will attempt to take the controller
     547             :  * lock before calling disconnect on a controller qpair.
     548             :  * Functions already holding the controller lock should
     549             :  * call nvme_transport_ctrlr_disconnect_qpair directly.
     550             :  */
     551             : void
     552           0 : nvme_ctrlr_disconnect_qpair(struct spdk_nvme_qpair *qpair)
     553             : {
     554           0 :         struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
     555             : 
     556           0 :         assert(ctrlr != NULL);
     557           0 :         nvme_ctrlr_lock(ctrlr);
     558           0 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     559           0 :         nvme_ctrlr_unlock(ctrlr);
     560           0 : }
     561             : 
     562             : int
     563          14 : spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
     564             : {
     565             :         struct spdk_nvme_ctrlr *ctrlr;
     566             : 
     567          14 :         if (qpair == NULL) {
     568           0 :                 return 0;
     569             :         }
     570             : 
     571          14 :         ctrlr = qpair->ctrlr;
     572             : 
     573          14 :         if (qpair->in_completion_context) {
     574             :                 /*
     575             :                  * There are many cases where it is convenient to delete an io qpair in the context
     576             :                  *  of that qpair's completion routine.  To handle this properly, set a flag here
     577             :                  *  so that the completion routine will perform an actual delete after the context
     578             :                  *  unwinds.
     579             :                  */
     580           0 :                 qpair->delete_after_completion_context = 1;
     581           0 :                 return 0;
     582             :         }
     583             : 
     584          14 :         if (qpair->auth.cb_fn != NULL) {
     585           0 :                 qpair->auth.cb_fn(qpair->auth.cb_ctx, -ECANCELED);
     586           0 :                 qpair->auth.cb_fn = NULL;
     587           0 :         }
     588             : 
     589          14 :         qpair->destroy_in_progress = 1;
     590             : 
     591          14 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
     592             : 
     593          14 :         if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
     594           0 :                 spdk_nvme_poll_group_remove(qpair->poll_group->group, qpair);
     595           0 :         }
     596             : 
     597             :         /* Do not retry. */
     598          14 :         nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
     599             : 
     600             :         /* In the multi-process case, a process may call this function on a foreign
     601             :          * I/O qpair (i.e. one that this process did not create) when that qpairs process
     602             :          * exits unexpectedly.  In that case, we must not try to abort any reqs associated
     603             :          * with that qpair, since the callbacks will also be foreign to this process.
     604             :          */
     605          14 :         if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
     606          14 :                 nvme_qpair_abort_all_queued_reqs(qpair);
     607          14 :         }
     608             : 
     609          14 :         nvme_ctrlr_lock(ctrlr);
     610             : 
     611          14 :         nvme_ctrlr_proc_remove_io_qpair(qpair);
     612             : 
     613          14 :         TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
     614          14 :         spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);
     615             : 
     616          14 :         nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair);
     617          14 :         nvme_ctrlr_unlock(ctrlr);
     618          14 :         return 0;
     619          14 : }
     620             : 
     621             : static void
     622           3 : nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
     623             :                 struct spdk_nvme_intel_log_page_directory *log_page_directory)
     624             : {
     625           3 :         if (log_page_directory == NULL) {
     626           0 :                 return;
     627             :         }
     628             : 
     629           3 :         assert(ctrlr->cdata.vid == SPDK_PCI_VID_INTEL);
     630             : 
     631           3 :         ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY] = true;
     632             : 
     633           3 :         if (log_page_directory->read_latency_log_len ||
     634           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_READ_LATENCY)) {
     635           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY] = true;
     636           2 :         }
     637           3 :         if (log_page_directory->write_latency_log_len ||
     638           2 :             (ctrlr->quirks & NVME_INTEL_QUIRK_WRITE_LATENCY)) {
     639           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY] = true;
     640           2 :         }
     641           3 :         if (log_page_directory->temperature_statistics_log_len) {
     642           2 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_TEMPERATURE] = true;
     643           2 :         }
     644           3 :         if (log_page_directory->smart_log_len) {
     645           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_LOG_SMART] = true;
     646           1 :         }
     647           3 :         if (log_page_directory->marketing_description_log_len) {
     648           1 :                 ctrlr->log_page_supported[SPDK_NVME_INTEL_MARKETING_DESCRIPTION] = true;
     649           1 :         }
     650           3 : }
     651             : 
     652             : struct intel_log_pages_ctx {
     653             :         struct spdk_nvme_intel_log_page_directory log_page_directory;
     654             :         struct spdk_nvme_ctrlr *ctrlr;
     655             : };
     656             : 
     657             : static void
     658           1 : nvme_ctrlr_set_intel_support_log_pages_done(void *arg, const struct spdk_nvme_cpl *cpl)
     659             : {
     660           1 :         struct intel_log_pages_ctx *ctx = arg;
     661           1 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
     662             : 
     663           1 :         if (!spdk_nvme_cpl_is_error(cpl)) {
     664           1 :                 nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, &ctx->log_page_directory);
     665           1 :         }
     666             : 
     667           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     668           1 :                              ctrlr->opts.admin_timeout_ms);
     669           1 :         free(ctx);
     670           1 : }
     671             : 
     672             : static int
     673           1 : nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     674             : {
     675           1 :         int rc = 0;
     676             :         struct intel_log_pages_ctx *ctx;
     677             : 
     678           1 :         ctx = calloc(1, sizeof(*ctx));
     679           1 :         if (!ctx) {
     680           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     681           0 :                                      ctrlr->opts.admin_timeout_ms);
     682           0 :                 return 0;
     683             :         }
     684             : 
     685           1 :         ctx->ctrlr = ctrlr;
     686             : 
     687           2 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
     688           1 :                                               SPDK_NVME_GLOBAL_NS_TAG, &ctx->log_page_directory,
     689             :                                               sizeof(struct spdk_nvme_intel_log_page_directory),
     690           1 :                                               0, nvme_ctrlr_set_intel_support_log_pages_done, ctx);
     691           1 :         if (rc != 0) {
     692           0 :                 free(ctx);
     693           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     694           0 :                                      ctrlr->opts.admin_timeout_ms);
     695           0 :                 return 0;
     696             :         }
     697             : 
     698           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES,
     699           1 :                              ctrlr->opts.admin_timeout_ms);
     700             : 
     701           1 :         return 0;
     702           1 : }
     703             : 
     704             : static int
     705           4 : nvme_ctrlr_alloc_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     706             : {
     707             :         uint32_t ana_log_page_size;
     708             : 
     709           8 :         ana_log_page_size = sizeof(struct spdk_nvme_ana_page) + ctrlr->cdata.nanagrpid *
     710           4 :                             sizeof(struct spdk_nvme_ana_group_descriptor) + ctrlr->active_ns_count *
     711             :                             sizeof(uint32_t);
     712             : 
     713             :         /* Number of active namespaces may have changed.
     714             :          * Check if ANA log page fits into existing buffer.
     715             :          */
     716           4 :         if (ana_log_page_size > ctrlr->ana_log_page_size) {
     717             :                 void *new_buffer;
     718             : 
     719           4 :                 if (ctrlr->ana_log_page) {
     720           1 :                         new_buffer = realloc(ctrlr->ana_log_page, ana_log_page_size);
     721           1 :                 } else {
     722           3 :                         new_buffer = calloc(1, ana_log_page_size);
     723             :                 }
     724             : 
     725           4 :                 if (!new_buffer) {
     726           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate ANA log page buffer, size %u\n",
     727             :                                           ana_log_page_size);
     728           0 :                         return -ENXIO;
     729             :                 }
     730             : 
     731           4 :                 ctrlr->ana_log_page = new_buffer;
     732           4 :                 if (ctrlr->copied_ana_desc) {
     733           1 :                         new_buffer = realloc(ctrlr->copied_ana_desc, ana_log_page_size);
     734           1 :                 } else {
     735           3 :                         new_buffer = calloc(1, ana_log_page_size);
     736             :                 }
     737             : 
     738           4 :                 if (!new_buffer) {
     739           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "could not allocate a buffer to parse ANA descriptor, size %u\n",
     740             :                                           ana_log_page_size);
     741           0 :                         return -ENOMEM;
     742             :                 }
     743             : 
     744           4 :                 ctrlr->copied_ana_desc = new_buffer;
     745           4 :                 ctrlr->ana_log_page_size = ana_log_page_size;
     746           4 :         }
     747             : 
     748           4 :         return 0;
     749           4 : }
     750             : 
     751             : static int
     752           4 : nvme_ctrlr_update_ana_log_page(struct spdk_nvme_ctrlr *ctrlr)
     753             : {
     754             :         struct nvme_completion_poll_status *status;
     755             :         int rc;
     756             : 
     757           4 :         rc = nvme_ctrlr_alloc_ana_log_page(ctrlr);
     758           4 :         if (rc != 0) {
     759           0 :                 return rc;
     760             :         }
     761             : 
     762           4 :         status = calloc(1, sizeof(*status));
     763           4 :         if (status == NULL) {
     764           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     765           0 :                 return -ENOMEM;
     766             :         }
     767             : 
     768           8 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS,
     769           4 :                                               SPDK_NVME_GLOBAL_NS_TAG, ctrlr->ana_log_page,
     770           4 :                                               ctrlr->ana_log_page_size, 0,
     771           4 :                                               nvme_completion_poll_cb, status);
     772           4 :         if (rc != 0) {
     773           0 :                 free(status);
     774           0 :                 return rc;
     775             :         }
     776             : 
     777           8 :         if (nvme_wait_for_completion_robust_lock_timeout(ctrlr->adminq, status, &ctrlr->ctrlr_lock,
     778           4 :                         ctrlr->opts.admin_timeout_ms * 1000)) {
     779           0 :                 if (!status->timed_out) {
     780           0 :                         free(status);
     781           0 :                 }
     782           0 :                 return -EIO;
     783             :         }
     784             : 
     785           4 :         free(status);
     786           4 :         return 0;
     787           4 : }
     788             : 
     789             : static int
     790           5 : nvme_ctrlr_update_ns_ana_states(const struct spdk_nvme_ana_group_descriptor *desc,
     791             :                                 void *cb_arg)
     792             : {
     793           5 :         struct spdk_nvme_ctrlr *ctrlr = cb_arg;
     794             :         struct spdk_nvme_ns *ns;
     795             :         uint32_t i, nsid;
     796             : 
     797          14 :         for (i = 0; i < desc->num_of_nsid; i++) {
     798           9 :                 nsid = desc->nsid[i];
     799           9 :                 if (nsid == 0 || nsid > ctrlr->cdata.nn) {
     800           0 :                         continue;
     801             :                 }
     802             : 
     803           9 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
     804           9 :                 assert(ns != NULL);
     805             : 
     806           9 :                 ns->ana_group_id = desc->ana_group_id;
     807           9 :                 ns->ana_state = desc->ana_state;
     808           9 :         }
     809             : 
     810           5 :         return 0;
     811             : }
     812             : 
     813             : int
     814           4 : nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
     815             :                               spdk_nvme_parse_ana_log_page_cb cb_fn, void *cb_arg)
     816             : {
     817             :         struct spdk_nvme_ana_group_descriptor *copied_desc;
     818             :         uint8_t *orig_desc;
     819             :         uint32_t i, desc_size, copy_len;
     820           4 :         int rc = 0;
     821             : 
     822           4 :         if (ctrlr->ana_log_page == NULL) {
     823           0 :                 return -EINVAL;
     824             :         }
     825             : 
     826           4 :         copied_desc = ctrlr->copied_ana_desc;
     827             : 
     828           4 :         orig_desc = (uint8_t *)ctrlr->ana_log_page + sizeof(struct spdk_nvme_ana_page);
     829           4 :         copy_len = ctrlr->ana_log_page_size - sizeof(struct spdk_nvme_ana_page);
     830             : 
     831           9 :         for (i = 0; i < ctrlr->ana_log_page->num_ana_group_desc; i++) {
     832           5 :                 memcpy(copied_desc, orig_desc, copy_len);
     833             : 
     834           5 :                 rc = cb_fn(copied_desc, cb_arg);
     835           5 :                 if (rc != 0) {
     836           0 :                         break;
     837             :                 }
     838             : 
     839           5 :                 desc_size = sizeof(struct spdk_nvme_ana_group_descriptor) +
     840           5 :                             copied_desc->num_of_nsid * sizeof(uint32_t);
     841           5 :                 orig_desc += desc_size;
     842           5 :                 copy_len -= desc_size;
     843           5 :         }
     844             : 
     845           4 :         return rc;
     846           4 : }
     847             : 
     848             : static int
     849          16 : nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
     850             : {
     851          16 :         int     rc = 0;
     852             : 
     853          16 :         memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
     854             :         /* Mandatory pages */
     855          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true;
     856          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true;
     857          16 :         ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true;
     858          16 :         if (ctrlr->cdata.lpa.celp) {
     859           1 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true;
     860           1 :         }
     861             : 
     862          16 :         if (ctrlr->cdata.cmic.ana_reporting) {
     863           2 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS] = true;
     864           2 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
     865           2 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
     866           2 :                         if (rc == 0) {
     867           4 :                                 nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
     868           2 :                                                               ctrlr);
     869           2 :                         }
     870           2 :                 }
     871           2 :         }
     872             : 
     873          16 :         if (ctrlr->cdata.ctratt.bits.fdps) {
     874           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_CONFIGURATIONS] = true;
     875           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_RECLAIM_UNIT_HANDLE_USAGE] = true;
     876           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_STATISTICS] = true;
     877           0 :                 ctrlr->log_page_supported[SPDK_NVME_LOG_FDP_EVENTS] = true;
     878           0 :         }
     879             : 
     880          17 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL &&
     881           1 :             ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
     882           1 :             !(ctrlr->quirks & NVME_INTEL_QUIRK_NO_LOG_PAGES)) {
     883           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES,
     884           1 :                                      ctrlr->opts.admin_timeout_ms);
     885             : 
     886           1 :         } else {
     887          30 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
     888          15 :                                      ctrlr->opts.admin_timeout_ms);
     889             : 
     890             :         }
     891             : 
     892          16 :         return rc;
     893             : }
     894             : 
     895             : static void
     896           1 : nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     897             : {
     898           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
     899           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
     900           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_POWER_GOVERNOR_SETTING] = true;
     901           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_SMBUS_ADDRESS] = true;
     902           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LED_PATTERN] = true;
     903           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_RESET_TIMED_WORKLOAD_COUNTERS] = true;
     904           1 :         ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING] = true;
     905           1 : }
     906             : 
     907             : static void
     908          18 : nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
     909             : {
     910             :         uint32_t cdw11;
     911             :         struct nvme_completion_poll_status *status;
     912             : 
     913          18 :         if (ctrlr->opts.arbitration_burst == 0) {
     914          16 :                 return;
     915             :         }
     916             : 
     917           2 :         if (ctrlr->opts.arbitration_burst > 7) {
     918           1 :                 NVME_CTRLR_WARNLOG(ctrlr, "Valid arbitration burst values is from 0-7\n");
     919           1 :                 return;
     920             :         }
     921             : 
     922           1 :         status = calloc(1, sizeof(*status));
     923           1 :         if (!status) {
     924           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
     925           0 :                 return;
     926             :         }
     927             : 
     928           1 :         cdw11 = ctrlr->opts.arbitration_burst;
     929             : 
     930           1 :         if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
     931           1 :                 cdw11 |= (uint32_t)ctrlr->opts.low_priority_weight << 8;
     932           1 :                 cdw11 |= (uint32_t)ctrlr->opts.medium_priority_weight << 16;
     933           1 :                 cdw11 |= (uint32_t)ctrlr->opts.high_priority_weight << 24;
     934           1 :         }
     935             : 
     936           3 :         if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
     937           1 :                                             cdw11, 0, NULL, 0,
     938           2 :                                             nvme_completion_poll_cb, status) < 0) {
     939           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set arbitration feature failed\n");
     940           0 :                 free(status);
     941           0 :                 return;
     942             :         }
     943             : 
     944           2 :         if (nvme_wait_for_completion_timeout(ctrlr->adminq, status,
     945           1 :                                              ctrlr->opts.admin_timeout_ms * 1000)) {
     946           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Timeout to set arbitration feature\n");
     947           0 :         }
     948             : 
     949           1 :         if (!status->timed_out) {
     950           1 :                 free(status);
     951           1 :         }
     952          18 : }
     953             : 
     954             : static void
     955          16 : nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
     956             : {
     957          16 :         memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
     958             :         /* Mandatory features */
     959          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true;
     960          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true;
     961          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true;
     962          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true;
     963          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true;
     964          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true;
     965          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true;
     966          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true;
     967          16 :         ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true;
     968             :         /* Optional features */
     969          16 :         if (ctrlr->cdata.vwc.present) {
     970           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true;
     971           0 :         }
     972          16 :         if (ctrlr->cdata.apsta.supported) {
     973           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true;
     974           0 :         }
     975          16 :         if (ctrlr->cdata.hmpre) {
     976           0 :                 ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true;
     977           0 :         }
     978          16 :         if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) {
     979           1 :                 nvme_ctrlr_set_intel_supported_features(ctrlr);
     980           1 :         }
     981             : 
     982          16 :         nvme_ctrlr_set_arbitration_feature(ctrlr);
     983          16 : }
     984             : 
     985             : static void
     986           1 : nvme_ctrlr_set_host_feature_done(void *arg, const struct spdk_nvme_cpl *cpl)
     987             : {
     988           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
     989             : 
     990           1 :         spdk_free(ctrlr->tmp_ptr);
     991           1 :         ctrlr->tmp_ptr = NULL;
     992             : 
     993           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
     994           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: SC %x SCT %x\n",
     995             :                                   cpl->status.sc, cpl->status.sct);
     996           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
     997           0 :                 return;
     998             :         }
     999             : 
    1000           1 :         ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT] = true;
    1001             : 
    1002           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    1003           1 :                              ctrlr->opts.admin_timeout_ms);
    1004           1 : }
    1005             : 
    1006             : /* We do not want to do add synchronous operation anymore.
    1007             :  * We set the Host Behavior Support feature asynchronousin in different states.
    1008             :  */
    1009             : static int
    1010          16 : nvme_ctrlr_set_host_feature(struct spdk_nvme_ctrlr *ctrlr)
    1011             : {
    1012             :         struct spdk_nvme_host_behavior *host;
    1013             :         int rc;
    1014             : 
    1015          16 :         if (!ctrlr->cdata.ctratt.bits.elbas) {
    1016          30 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
    1017          15 :                                      ctrlr->opts.admin_timeout_ms);
    1018          15 :                 return 0;
    1019             :         }
    1020             : 
    1021           1 :         ctrlr->tmp_ptr = spdk_dma_zmalloc(sizeof(struct spdk_nvme_host_behavior), 4096, NULL);
    1022           1 :         if (!ctrlr->tmp_ptr) {
    1023           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate host behavior support data\n");
    1024           0 :                 rc = -ENOMEM;
    1025           0 :                 goto error;
    1026             :         }
    1027             : 
    1028           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE,
    1029           1 :                              ctrlr->opts.admin_timeout_ms);
    1030             : 
    1031           1 :         host = ctrlr->tmp_ptr;
    1032             : 
    1033           1 :         host->lbafee = 1;
    1034             : 
    1035           2 :         rc = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT,
    1036           1 :                                              0, 0, host, sizeof(struct spdk_nvme_host_behavior),
    1037           1 :                                              nvme_ctrlr_set_host_feature_done, ctrlr);
    1038           1 :         if (rc != 0) {
    1039           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set host behavior support feature failed: %d\n", rc);
    1040           0 :                 goto error;
    1041             :         }
    1042             : 
    1043           1 :         return 0;
    1044             : 
    1045             : error:
    1046           0 :         spdk_free(ctrlr->tmp_ptr);
    1047           0 :         ctrlr->tmp_ptr = NULL;
    1048             : 
    1049           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1050           0 :         return rc;
    1051          16 : }
    1052             : 
    1053             : bool
    1054           0 : spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr)
    1055             : {
    1056           0 :         return ctrlr->is_failed;
    1057             : }
    1058             : 
    1059             : void
    1060           1 : nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
    1061             : {
    1062             :         /*
    1063             :          * Set the flag here and leave the work failure of qpairs to
    1064             :          * spdk_nvme_qpair_process_completions().
    1065             :          */
    1066           1 :         if (hot_remove) {
    1067           0 :                 ctrlr->is_removed = true;
    1068           0 :         }
    1069             : 
    1070           1 :         if (ctrlr->is_failed) {
    1071           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "already in failed state\n");
    1072           0 :                 return;
    1073             :         }
    1074             : 
    1075           1 :         if (ctrlr->is_disconnecting) {
    1076           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "already disconnecting\n");
    1077           0 :                 return;
    1078             :         }
    1079             : 
    1080           1 :         ctrlr->is_failed = true;
    1081           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1082           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1083           1 :         NVME_CTRLR_ERRLOG(ctrlr, "in failed state.\n");
    1084           1 : }
    1085             : 
    1086             : /**
    1087             :  * This public API function will try to take the controller lock.
    1088             :  * Any private functions being called from a thread already holding
    1089             :  * the ctrlr lock should call nvme_ctrlr_fail directly.
    1090             :  */
    1091             : void
    1092           0 : spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
    1093             : {
    1094           0 :         nvme_ctrlr_lock(ctrlr);
    1095           0 :         nvme_ctrlr_fail(ctrlr, false);
    1096           0 :         nvme_ctrlr_unlock(ctrlr);
    1097           0 : }
    1098             : 
    1099             : static void
    1100          39 : nvme_ctrlr_shutdown_set_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1101             : {
    1102          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1103          39 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1104             : 
    1105          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1106           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1107           0 :                 ctx->shutdown_complete = true;
    1108           0 :                 return;
    1109             :         }
    1110             : 
    1111          39 :         if (ctrlr->opts.no_shn_notification) {
    1112           0 :                 ctx->shutdown_complete = true;
    1113           0 :                 return;
    1114             :         }
    1115             : 
    1116             :         /*
    1117             :          * The NVMe specification defines RTD3E to be the time between
    1118             :          *  setting SHN = 1 until the controller will set SHST = 10b.
    1119             :          * If the device doesn't report RTD3 entry latency, or if it
    1120             :          *  reports RTD3 entry latency less than 10 seconds, pick
    1121             :          *  10 seconds as a reasonable amount of time to
    1122             :          *  wait before proceeding.
    1123             :          */
    1124          39 :         NVME_CTRLR_DEBUGLOG(ctrlr, "RTD3E = %" PRIu32 " us\n", ctrlr->cdata.rtd3e);
    1125          39 :         ctx->shutdown_timeout_ms = SPDK_CEIL_DIV(ctrlr->cdata.rtd3e, 1000);
    1126          39 :         ctx->shutdown_timeout_ms = spdk_max(ctx->shutdown_timeout_ms, 10000);
    1127          39 :         NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown timeout = %" PRIu32 " ms\n", ctx->shutdown_timeout_ms);
    1128             : 
    1129          39 :         ctx->shutdown_start_tsc = spdk_get_ticks();
    1130          39 :         ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1131          39 : }
    1132             : 
    1133             : static void
    1134          39 : nvme_ctrlr_shutdown_get_cc_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1135             : {
    1136          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1137          39 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    1138             :         union spdk_nvme_cc_register cc;
    1139             :         int rc;
    1140             : 
    1141          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1142           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1143           0 :                 ctx->shutdown_complete = true;
    1144           0 :                 return;
    1145             :         }
    1146             : 
    1147          39 :         assert(value <= UINT32_MAX);
    1148          39 :         cc.raw = (uint32_t)value;
    1149             : 
    1150          39 :         if (ctrlr->opts.no_shn_notification) {
    1151           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Disable SSD without shutdown notification\n");
    1152           0 :                 if (cc.bits.en == 0) {
    1153           0 :                         ctx->shutdown_complete = true;
    1154           0 :                         return;
    1155             :                 }
    1156             : 
    1157           0 :                 cc.bits.en = 0;
    1158           0 :         } else {
    1159          39 :                 cc.bits.shn = SPDK_NVME_SHN_NORMAL;
    1160             :         }
    1161             : 
    1162          39 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_shutdown_set_cc_done, ctx);
    1163          39 :         if (rc != 0) {
    1164           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write CC.SHN\n");
    1165           0 :                 ctx->shutdown_complete = true;
    1166           0 :         }
    1167          39 : }
    1168             : 
    1169             : static void
    1170          47 : nvme_ctrlr_shutdown_async(struct spdk_nvme_ctrlr *ctrlr,
    1171             :                           struct nvme_ctrlr_detach_ctx *ctx)
    1172             : {
    1173             :         int rc;
    1174             : 
    1175          47 :         if (ctrlr->is_removed) {
    1176           0 :                 ctx->shutdown_complete = true;
    1177           0 :                 return;
    1178             :         }
    1179             : 
    1180          47 :         if (ctrlr->adminq == NULL ||
    1181          40 :             ctrlr->adminq->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) {
    1182           8 :                 NVME_CTRLR_INFOLOG(ctrlr, "Adminq is not connected.\n");
    1183           8 :                 ctx->shutdown_complete = true;
    1184           8 :                 return;
    1185             :         }
    1186             : 
    1187          39 :         ctx->state = NVME_CTRLR_DETACH_SET_CC;
    1188          39 :         rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_shutdown_get_cc_done, ctx);
    1189          39 :         if (rc != 0) {
    1190           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    1191           0 :                 ctx->shutdown_complete = true;
    1192           0 :         }
    1193          47 : }
    1194             : 
    1195             : static void
    1196          39 : nvme_ctrlr_shutdown_get_csts_done(void *_ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1197             : {
    1198          39 :         struct nvme_ctrlr_detach_ctx *ctx = _ctx;
    1199             : 
    1200          39 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1201           0 :                 NVME_CTRLR_ERRLOG(ctx->ctrlr, "Failed to read the CSTS register\n");
    1202           0 :                 ctx->shutdown_complete = true;
    1203           0 :                 return;
    1204             :         }
    1205             : 
    1206          39 :         assert(value <= UINT32_MAX);
    1207          39 :         ctx->csts.raw = (uint32_t)value;
    1208          39 :         ctx->state = NVME_CTRLR_DETACH_GET_CSTS_DONE;
    1209          39 : }
    1210             : 
    1211             : static int
    1212          78 : nvme_ctrlr_shutdown_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    1213             :                                struct nvme_ctrlr_detach_ctx *ctx)
    1214             : {
    1215             :         union spdk_nvme_csts_register   csts;
    1216             :         uint32_t                        ms_waited;
    1217             : 
    1218          78 :         switch (ctx->state) {
    1219             :         case NVME_CTRLR_DETACH_SET_CC:
    1220             :         case NVME_CTRLR_DETACH_GET_CSTS:
    1221             :                 /* We're still waiting for the register operation to complete */
    1222           0 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    1223           0 :                 return -EAGAIN;
    1224             : 
    1225             :         case NVME_CTRLR_DETACH_CHECK_CSTS:
    1226          39 :                 ctx->state = NVME_CTRLR_DETACH_GET_CSTS;
    1227          39 :                 if (nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_shutdown_get_csts_done, ctx)) {
    1228           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    1229           0 :                         return -EIO;
    1230             :                 }
    1231          39 :                 return -EAGAIN;
    1232             : 
    1233             :         case NVME_CTRLR_DETACH_GET_CSTS_DONE:
    1234          39 :                 ctx->state = NVME_CTRLR_DETACH_CHECK_CSTS;
    1235          39 :                 break;
    1236             : 
    1237             :         default:
    1238           0 :                 assert(0 && "Should never happen");
    1239             :                 return -EINVAL;
    1240             :         }
    1241             : 
    1242          39 :         ms_waited = (spdk_get_ticks() - ctx->shutdown_start_tsc) * 1000 / spdk_get_ticks_hz();
    1243          39 :         csts.raw = ctx->csts.raw;
    1244             : 
    1245          39 :         if (csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
    1246          39 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "shutdown complete in %u milliseconds\n", ms_waited);
    1247          39 :                 return 0;
    1248             :         }
    1249             : 
    1250           0 :         if (ms_waited < ctx->shutdown_timeout_ms) {
    1251           0 :                 return -EAGAIN;
    1252             :         }
    1253             : 
    1254           0 :         NVME_CTRLR_ERRLOG(ctrlr, "did not shutdown within %u milliseconds\n",
    1255             :                           ctx->shutdown_timeout_ms);
    1256           0 :         if (ctrlr->quirks & NVME_QUIRK_SHST_COMPLETE) {
    1257           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "likely due to shutdown handling in the VMWare emulated NVMe SSD\n");
    1258           0 :         }
    1259             : 
    1260           0 :         return 0;
    1261          78 : }
    1262             : 
    1263             : static inline uint64_t
    1264         509 : nvme_ctrlr_get_ready_timeout(struct spdk_nvme_ctrlr *ctrlr)
    1265             : {
    1266         509 :         return ctrlr->cap.bits.to * 500;
    1267             : }
    1268             : 
    1269             : static void
    1270          14 : nvme_ctrlr_set_cc_en_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    1271             : {
    1272          14 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    1273             : 
    1274          14 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1275           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to set the CC register\n");
    1276           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1277           0 :                 return;
    1278             :         }
    1279             : 
    1280          28 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    1281          14 :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    1282          14 : }
    1283             : 
    1284             : static int
    1285          21 : nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
    1286             : {
    1287             :         union spdk_nvme_cc_register     cc;
    1288             :         int                             rc;
    1289             : 
    1290          21 :         rc = nvme_transport_ctrlr_enable(ctrlr);
    1291          21 :         if (rc != 0) {
    1292           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "transport ctrlr_enable failed\n");
    1293           0 :                 return rc;
    1294             :         }
    1295             : 
    1296          21 :         cc.raw = ctrlr->process_init_cc.raw;
    1297          21 :         if (cc.bits.en != 0) {
    1298           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "called with CC.EN = 1\n");
    1299           0 :                 return -EINVAL;
    1300             :         }
    1301             : 
    1302          21 :         cc.bits.en = 1;
    1303          21 :         cc.bits.css = 0;
    1304          21 :         cc.bits.shn = 0;
    1305          21 :         cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
    1306          21 :         cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
    1307             : 
    1308             :         /* Page size is 2 ^ (12 + mps). */
    1309          21 :         cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
    1310             : 
    1311             :         /*
    1312             :          * Since NVMe 1.0, a controller should have at least one bit set in CAP.CSS.
    1313             :          * A controller that does not have any bit set in CAP.CSS is not spec compliant.
    1314             :          * Try to support such a controller regardless.
    1315             :          */
    1316          21 :         if (ctrlr->cap.bits.css == 0) {
    1317          21 :                 NVME_CTRLR_INFOLOG(ctrlr, "Drive reports no command sets supported. Assuming NVM is supported.\n");
    1318          21 :                 ctrlr->cap.bits.css = SPDK_NVME_CAP_CSS_NVM;
    1319          21 :         }
    1320             : 
    1321             :         /*
    1322             :          * If the user did not explicitly request a command set, or supplied a value larger than
    1323             :          * what can be saved in CC.CSS, use the most reasonable default.
    1324             :          */
    1325          21 :         if (ctrlr->opts.command_set >= CHAR_BIT) {
    1326           0 :                 if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS) {
    1327           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_IOCS;
    1328           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NVM) {
    1329           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1330           0 :                 } else if (ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_NOIO) {
    1331             :                         /* Technically we should respond with CC_CSS_NOIO in
    1332             :                          * this case, but we use NVM instead to work around
    1333             :                          * buggy targets and to match Linux driver behavior.
    1334             :                          */
    1335           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1336           0 :                 } else {
    1337             :                         /* Invalid supported bits detected, falling back to NVM. */
    1338           0 :                         ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1339             :                 }
    1340           0 :         }
    1341             : 
    1342             :         /* Verify that the selected command set is supported by the controller. */
    1343          21 :         if (!(ctrlr->cap.bits.css & (1u << ctrlr->opts.command_set))) {
    1344           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Requested I/O command set %u but supported mask is 0x%x\n",
    1345             :                                     ctrlr->opts.command_set, ctrlr->cap.bits.css);
    1346           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Falling back to NVM. Assuming NVM is supported.\n");
    1347           0 :                 ctrlr->opts.command_set = SPDK_NVME_CC_CSS_NVM;
    1348           0 :         }
    1349             : 
    1350          21 :         cc.bits.css = ctrlr->opts.command_set;
    1351             : 
    1352          21 :         switch (ctrlr->opts.arb_mechanism) {
    1353             :         case SPDK_NVME_CC_AMS_RR:
    1354          10 :                 break;
    1355             :         case SPDK_NVME_CC_AMS_WRR:
    1356           4 :                 if (SPDK_NVME_CAP_AMS_WRR & ctrlr->cap.bits.ams) {
    1357           2 :                         break;
    1358             :                 }
    1359           2 :                 return -EINVAL;
    1360             :         case SPDK_NVME_CC_AMS_VS:
    1361           4 :                 if (SPDK_NVME_CAP_AMS_VS & ctrlr->cap.bits.ams) {
    1362           2 :                         break;
    1363             :                 }
    1364           2 :                 return -EINVAL;
    1365             :         default:
    1366           3 :                 return -EINVAL;
    1367             :         }
    1368             : 
    1369          14 :         cc.bits.ams = ctrlr->opts.arb_mechanism;
    1370          14 :         ctrlr->process_init_cc.raw = cc.raw;
    1371             : 
    1372          14 :         if (nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_set_cc_en_done, ctrlr)) {
    1373           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    1374           0 :                 return -EIO;
    1375             :         }
    1376             : 
    1377          14 :         return 0;
    1378          21 : }
    1379             : 
    1380             : static const char *
    1381           1 : nvme_ctrlr_state_string(enum nvme_ctrlr_state state)
    1382             : {
    1383           1 :         switch (state) {
    1384             :         case NVME_CTRLR_STATE_INIT_DELAY:
    1385           0 :                 return "delay init";
    1386             :         case NVME_CTRLR_STATE_CONNECT_ADMINQ:
    1387           0 :                 return "connect adminq";
    1388             :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    1389           0 :                 return "wait for connect adminq";
    1390             :         case NVME_CTRLR_STATE_READ_VS:
    1391           0 :                 return "read vs";
    1392             :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    1393           0 :                 return "read vs wait for vs";
    1394             :         case NVME_CTRLR_STATE_READ_CAP:
    1395           0 :                 return "read cap";
    1396             :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    1397           0 :                 return "read cap wait for cap";
    1398             :         case NVME_CTRLR_STATE_CHECK_EN:
    1399           0 :                 return "check en";
    1400             :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    1401           0 :                 return "check en wait for cc";
    1402             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    1403           0 :                 return "disable and wait for CSTS.RDY = 1";
    1404             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1405           0 :                 return "disable and wait for CSTS.RDY = 1 reg";
    1406             :         case NVME_CTRLR_STATE_SET_EN_0:
    1407           0 :                 return "set CC.EN = 0";
    1408             :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    1409           0 :                 return "set CC.EN = 0 wait for cc";
    1410             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    1411           0 :                 return "disable and wait for CSTS.RDY = 0";
    1412             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    1413           0 :                 return "disable and wait for CSTS.RDY = 0 reg";
    1414             :         case NVME_CTRLR_STATE_DISABLED:
    1415           0 :                 return "controller is disabled";
    1416             :         case NVME_CTRLR_STATE_ENABLE:
    1417           0 :                 return "enable controller by writing CC.EN = 1";
    1418             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    1419           0 :                 return "enable controller by writing CC.EN = 1 reg";
    1420             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    1421           0 :                 return "wait for CSTS.RDY = 1";
    1422             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    1423           0 :                 return "wait for CSTS.RDY = 1 reg";
    1424             :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    1425           0 :                 return "reset admin queue";
    1426             :         case NVME_CTRLR_STATE_IDENTIFY:
    1427           0 :                 return "identify controller";
    1428             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    1429           0 :                 return "wait for identify controller";
    1430             :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    1431           0 :                 return "configure AER";
    1432             :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    1433           0 :                 return "wait for configure aer";
    1434             :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    1435           0 :                 return "set keep alive timeout";
    1436             :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    1437           0 :                 return "wait for set keep alive timeout";
    1438             :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    1439           0 :                 return "identify controller iocs specific";
    1440             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    1441           0 :                 return "wait for identify controller iocs specific";
    1442             :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    1443           0 :                 return "get zns cmd and effects log page";
    1444             :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    1445           0 :                 return "wait for get zns cmd and effects log page";
    1446             :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    1447           0 :                 return "set number of queues";
    1448             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    1449           0 :                 return "wait for set number of queues";
    1450             :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    1451           0 :                 return "identify active ns";
    1452             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    1453           0 :                 return "wait for identify active ns";
    1454             :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    1455           0 :                 return "identify ns";
    1456             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    1457           0 :                 return "wait for identify ns";
    1458             :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    1459           0 :                 return "identify namespace id descriptors";
    1460             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    1461           0 :                 return "wait for identify namespace id descriptors";
    1462             :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    1463           0 :                 return "identify ns iocs specific";
    1464             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    1465           0 :                 return "wait for identify ns iocs specific";
    1466             :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    1467           0 :                 return "set supported log pages";
    1468             :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    1469           0 :                 return "set supported INTEL log pages";
    1470             :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    1471           0 :                 return "wait for supported INTEL log pages";
    1472             :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    1473           0 :                 return "set supported features";
    1474             :         case NVME_CTRLR_STATE_SET_HOST_FEATURE:
    1475           0 :                 return "set host behavior support feature";
    1476             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
    1477           0 :                 return "wait for set host behavior support feature";
    1478             :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    1479           0 :                 return "set doorbell buffer config";
    1480             :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    1481           0 :                 return "wait for doorbell buffer config";
    1482             :         case NVME_CTRLR_STATE_SET_HOST_ID:
    1483           0 :                 return "set host ID";
    1484             :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    1485           0 :                 return "wait for set host ID";
    1486             :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    1487           0 :                 return "transport ready";
    1488             :         case NVME_CTRLR_STATE_READY:
    1489           0 :                 return "ready";
    1490             :         case NVME_CTRLR_STATE_ERROR:
    1491           1 :                 return "error";
    1492             :         case NVME_CTRLR_STATE_DISCONNECTED:
    1493           0 :                 return "disconnected";
    1494             :         }
    1495           0 :         return "unknown";
    1496           1 : };
    1497             : 
    1498             : static void
    1499         732 : _nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1500             :                       uint64_t timeout_in_ms, bool quiet)
    1501             : {
    1502             :         uint64_t ticks_per_ms, timeout_in_ticks, now_ticks;
    1503             : 
    1504         732 :         ctrlr->state = state;
    1505         732 :         if (timeout_in_ms == NVME_TIMEOUT_KEEP_EXISTING) {
    1506          33 :                 if (!quiet) {
    1507           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (keeping existing timeout)\n",
    1508             :                                             nvme_ctrlr_state_string(ctrlr->state));
    1509           0 :                 }
    1510          33 :                 return;
    1511             :         }
    1512             : 
    1513         699 :         if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
    1514         697 :                 goto inf;
    1515             :         }
    1516             : 
    1517           2 :         ticks_per_ms = spdk_get_ticks_hz() / 1000;
    1518           2 :         if (timeout_in_ms > UINT64_MAX / ticks_per_ms) {
    1519           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1520             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1521           0 :                 goto inf;
    1522             :         }
    1523             : 
    1524           2 :         now_ticks = spdk_get_ticks();
    1525           2 :         timeout_in_ticks = timeout_in_ms * ticks_per_ms;
    1526           2 :         if (timeout_in_ticks > UINT64_MAX - now_ticks) {
    1527           1 :                 NVME_CTRLR_ERRLOG(ctrlr,
    1528             :                                   "Specified timeout would cause integer overflow. Defaulting to no timeout.\n");
    1529           1 :                 goto inf;
    1530             :         }
    1531             : 
    1532           1 :         ctrlr->state_timeout_tsc = timeout_in_ticks + now_ticks;
    1533           1 :         if (!quiet) {
    1534           1 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (timeout %" PRIu64 " ms)\n",
    1535             :                                     nvme_ctrlr_state_string(ctrlr->state), timeout_in_ms);
    1536           1 :         }
    1537           1 :         return;
    1538             : inf:
    1539         698 :         if (!quiet) {
    1540         698 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "setting state to %s (no timeout)\n",
    1541             :                                     nvme_ctrlr_state_string(ctrlr->state));
    1542         698 :         }
    1543         698 :         ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
    1544         732 : }
    1545             : 
    1546             : static void
    1547         699 : nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1548             :                      uint64_t timeout_in_ms)
    1549             : {
    1550         699 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, false);
    1551         699 : }
    1552             : 
    1553             : static void
    1554          33 : nvme_ctrlr_set_state_quiet(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
    1555             :                            uint64_t timeout_in_ms)
    1556             : {
    1557          33 :         _nvme_ctrlr_set_state(ctrlr, state, timeout_in_ms, true);
    1558          33 : }
    1559             : 
    1560             : static void
    1561          48 : nvme_ctrlr_free_zns_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1562             : {
    1563          48 :         spdk_free(ctrlr->cdata_zns);
    1564          48 :         ctrlr->cdata_zns = NULL;
    1565          48 : }
    1566             : 
    1567             : static void
    1568          48 : nvme_ctrlr_free_iocs_specific_data(struct spdk_nvme_ctrlr *ctrlr)
    1569             : {
    1570          48 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    1571          48 : }
    1572             : 
    1573             : static void
    1574          49 : nvme_ctrlr_free_doorbell_buffer(struct spdk_nvme_ctrlr *ctrlr)
    1575             : {
    1576          49 :         if (ctrlr->shadow_doorbell) {
    1577           1 :                 spdk_free(ctrlr->shadow_doorbell);
    1578           1 :                 ctrlr->shadow_doorbell = NULL;
    1579           1 :         }
    1580             : 
    1581          49 :         if (ctrlr->eventidx) {
    1582           1 :                 spdk_free(ctrlr->eventidx);
    1583           1 :                 ctrlr->eventidx = NULL;
    1584           1 :         }
    1585          49 : }
    1586             : 
    1587             : static void
    1588           1 : nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl *cpl)
    1589             : {
    1590           1 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    1591             : 
    1592           1 :         if (spdk_nvme_cpl_is_error(cpl)) {
    1593           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Doorbell buffer config failed\n");
    1594           0 :         } else {
    1595           1 :                 NVME_CTRLR_INFOLOG(ctrlr, "Doorbell buffer config enabled\n");
    1596             :         }
    1597           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1598           1 :                              ctrlr->opts.admin_timeout_ms);
    1599           1 : }
    1600             : 
    1601             : static int
    1602          15 : nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
    1603             : {
    1604          15 :         int rc = 0;
    1605             :         uint64_t prp1, prp2, len;
    1606             : 
    1607          15 :         if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
    1608          28 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1609          14 :                                      ctrlr->opts.admin_timeout_ms);
    1610          14 :                 return 0;
    1611             :         }
    1612             : 
    1613           1 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    1614           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
    1615           0 :                                      ctrlr->opts.admin_timeout_ms);
    1616           0 :                 return 0;
    1617             :         }
    1618             : 
    1619             :         /* only 1 page size for doorbell buffer */
    1620           1 :         ctrlr->shadow_doorbell = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1621             :                                               NULL, SPDK_ENV_LCORE_ID_ANY,
    1622             :                                               SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1623           1 :         if (ctrlr->shadow_doorbell == NULL) {
    1624           0 :                 rc = -ENOMEM;
    1625           0 :                 goto error;
    1626             :         }
    1627             : 
    1628           1 :         len = ctrlr->page_size;
    1629           1 :         prp1 = spdk_vtophys(ctrlr->shadow_doorbell, &len);
    1630           1 :         if (prp1 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1631           0 :                 rc = -EFAULT;
    1632           0 :                 goto error;
    1633             :         }
    1634             : 
    1635           1 :         ctrlr->eventidx = spdk_zmalloc(ctrlr->page_size, ctrlr->page_size,
    1636             :                                        NULL, SPDK_ENV_LCORE_ID_ANY,
    1637             :                                        SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE);
    1638           1 :         if (ctrlr->eventidx == NULL) {
    1639           0 :                 rc = -ENOMEM;
    1640           0 :                 goto error;
    1641             :         }
    1642             : 
    1643           1 :         len = ctrlr->page_size;
    1644           1 :         prp2 = spdk_vtophys(ctrlr->eventidx, &len);
    1645           1 :         if (prp2 == SPDK_VTOPHYS_ERROR || len != ctrlr->page_size) {
    1646           0 :                 rc = -EFAULT;
    1647           0 :                 goto error;
    1648             :         }
    1649             : 
    1650           2 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
    1651           1 :                              ctrlr->opts.admin_timeout_ms);
    1652             : 
    1653           2 :         rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
    1654           1 :                         nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
    1655           1 :         if (rc != 0) {
    1656           0 :                 goto error;
    1657             :         }
    1658             : 
    1659           1 :         return 0;
    1660             : 
    1661             : error:
    1662           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    1663           0 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1664           0 :         return rc;
    1665          15 : }
    1666             : 
    1667             : void
    1668          48 : nvme_ctrlr_abort_queued_aborts(struct spdk_nvme_ctrlr *ctrlr)
    1669             : {
    1670             :         struct nvme_request     *req, *tmp;
    1671          48 :         struct spdk_nvme_cpl    cpl = {};
    1672             : 
    1673          48 :         cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
    1674          48 :         cpl.status.sct = SPDK_NVME_SCT_GENERIC;
    1675             : 
    1676          48 :         STAILQ_FOREACH_SAFE(req, &ctrlr->queued_aborts, stailq, tmp) {
    1677           0 :                 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
    1678           0 :                 ctrlr->outstanding_aborts++;
    1679             : 
    1680           0 :                 nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, &cpl);
    1681           0 :         }
    1682          48 : }
    1683             : 
    1684             : static int
    1685           2 : nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1686             : {
    1687           2 :         if (ctrlr->is_resetting || ctrlr->is_removed) {
    1688             :                 /*
    1689             :                  * Controller is already resetting or has been removed. Return
    1690             :                  *  immediately since there is no need to kick off another
    1691             :                  *  reset in these cases.
    1692             :                  */
    1693           1 :                 return ctrlr->is_resetting ? -EBUSY : -ENXIO;
    1694             :         }
    1695             : 
    1696           1 :         ctrlr->is_resetting = true;
    1697           1 :         ctrlr->is_failed = false;
    1698           1 :         ctrlr->is_disconnecting = true;
    1699           1 :         ctrlr->prepare_for_reset = true;
    1700             : 
    1701           1 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting controller\n");
    1702             : 
    1703             :         /* Disable keep-alive, it'll be re-enabled as part of the init process */
    1704           1 :         ctrlr->keep_alive_interval_ticks = 0;
    1705             : 
    1706             :         /* Abort all of the queued abort requests */
    1707           1 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    1708             : 
    1709           1 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    1710             : 
    1711           1 :         ctrlr->adminq->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1712           1 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    1713             : 
    1714           1 :         return 0;
    1715           2 : }
    1716             : 
    1717             : static void
    1718           1 : nvme_ctrlr_disconnect_done(struct spdk_nvme_ctrlr *ctrlr)
    1719             : {
    1720           1 :         assert(ctrlr->is_failed == false);
    1721           1 :         ctrlr->is_disconnecting = false;
    1722             : 
    1723             :         /* Doorbell buffer config is invalid during reset */
    1724           1 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    1725             : 
    1726             :         /* I/O Command Set Specific Identify Controller data is invalidated during reset */
    1727           1 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    1728             : 
    1729           1 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    1730             : 
    1731             :         /* Set the state back to DISCONNECTED to cause a full hardware reset. */
    1732           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISCONNECTED, NVME_TIMEOUT_INFINITE);
    1733           1 : }
    1734             : 
    1735             : int
    1736           0 : spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr)
    1737             : {
    1738             :         int rc;
    1739             : 
    1740           0 :         nvme_ctrlr_lock(ctrlr);
    1741           0 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1742           0 :         nvme_ctrlr_unlock(ctrlr);
    1743             : 
    1744           0 :         return rc;
    1745             : }
    1746             : 
    1747             : void
    1748           1 : spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr)
    1749             : {
    1750           1 :         nvme_ctrlr_lock(ctrlr);
    1751             : 
    1752           1 :         ctrlr->prepare_for_reset = false;
    1753             : 
    1754             :         /* Set the state back to INIT to cause a full hardware reset. */
    1755           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    1756             : 
    1757             :         /* Return without releasing ctrlr_lock. ctrlr_lock will be released when
    1758             :          * spdk_nvme_ctrlr_reset_poll_async() returns 0.
    1759             :          */
    1760           1 : }
    1761             : 
    1762             : int
    1763           0 : nvme_ctrlr_reinitialize_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
    1764             : {
    1765             :         bool async;
    1766             :         int rc;
    1767             : 
    1768           0 :         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc ||
    1769           0 :             spdk_nvme_ctrlr_is_fabrics(ctrlr) || nvme_qpair_is_admin_queue(qpair)) {
    1770           0 :                 assert(false);
    1771             :                 return -EINVAL;
    1772             :         }
    1773             : 
    1774             :         /* Force a synchronous connect. */
    1775           0 :         async = qpair->async;
    1776           0 :         qpair->async = false;
    1777           0 :         rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
    1778           0 :         qpair->async = async;
    1779             : 
    1780           0 :         if (rc != 0) {
    1781           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1782           0 :         }
    1783             : 
    1784           0 :         return rc;
    1785             : }
    1786             : 
    1787             : /**
    1788             :  * This function will be called when the controller is being reinitialized.
    1789             :  * Note: the ctrlr_lock must be held when calling this function.
    1790             :  */
    1791             : int
    1792          25 : spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr)
    1793             : {
    1794             :         struct spdk_nvme_ns *ns, *tmp_ns;
    1795             :         struct spdk_nvme_qpair  *qpair;
    1796          25 :         int rc = 0, rc_tmp = 0;
    1797             : 
    1798          25 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1799           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "controller reinitialization failed\n");
    1800           0 :                 rc = -1;
    1801           0 :         }
    1802          25 :         if (ctrlr->state != NVME_CTRLR_STATE_READY && rc != -1) {
    1803          24 :                 return -EAGAIN;
    1804             :         }
    1805             : 
    1806             :         /*
    1807             :          * For non-fabrics controllers, the memory locations of the transport qpair
    1808             :          * don't change when the controller is reset. They simply need to be
    1809             :          * re-enabled with admin commands to the controller. For fabric
    1810             :          * controllers we need to disconnect and reconnect the qpair on its
    1811             :          * own thread outside of the context of the reset.
    1812             :          */
    1813           1 :         if (rc == 0 && !spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    1814             :                 /* Reinitialize qpairs */
    1815           1 :                 TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1816             :                         /* Always clear the qid bit here, even for a foreign qpair. We need
    1817             :                          * to make sure another process doesn't get the chance to grab that
    1818             :                          * qid.
    1819             :                          */
    1820           0 :                         assert(spdk_bit_array_get(ctrlr->free_io_qids, qpair->id));
    1821           0 :                         spdk_bit_array_clear(ctrlr->free_io_qids, qpair->id);
    1822           0 :                         if (nvme_ctrlr_get_current_process(ctrlr) != qpair->active_proc) {
    1823             :                                 /*
    1824             :                                  * We cannot reinitialize a foreign qpair. The qpair's owning
    1825             :                                  * process will take care of it. Set failure reason to FAILURE_RESET
    1826             :                                  * to ensure that happens.
    1827             :                                  */
    1828           0 :                                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_RESET;
    1829           0 :                                 continue;
    1830             :                         }
    1831           0 :                         rc_tmp = nvme_ctrlr_reinitialize_io_qpair(ctrlr, qpair);
    1832           0 :                         if (rc_tmp != 0) {
    1833           0 :                                 rc = rc_tmp;
    1834           0 :                         }
    1835           0 :                 }
    1836           1 :         }
    1837             : 
    1838             :         /*
    1839             :          * Take this opportunity to remove inactive namespaces. During a reset namespace
    1840             :          * handles can be invalidated.
    1841             :          */
    1842           5 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    1843           4 :                 if (!ns->active) {
    1844           1 :                         RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    1845           1 :                         spdk_free(ns);
    1846           1 :                 }
    1847           4 :         }
    1848             : 
    1849           1 :         if (rc) {
    1850           0 :                 nvme_ctrlr_fail(ctrlr, false);
    1851           0 :         }
    1852           1 :         ctrlr->is_resetting = false;
    1853             : 
    1854           1 :         nvme_ctrlr_unlock(ctrlr);
    1855             : 
    1856           1 :         if (!ctrlr->cdata.oaes.ns_attribute_notices) {
    1857             :                 /*
    1858             :                  * If controller doesn't support ns_attribute_notices and
    1859             :                  * namespace attributes change (e.g. number of namespaces)
    1860             :                  * we need to update system handling device reset.
    1861             :                  */
    1862           1 :                 nvme_io_msg_ctrlr_update(ctrlr);
    1863           1 :         }
    1864             : 
    1865           1 :         return rc;
    1866          25 : }
    1867             : 
    1868             : /*
    1869             :  * For PCIe transport, spdk_nvme_ctrlr_disconnect() will do a Controller Level Reset
    1870             :  * (Change CC.EN from 1 to 0) as a operation to disconnect the admin qpair.
    1871             :  * The following two functions are added to do a Controller Level Reset. They have
    1872             :  * to be called under the nvme controller's lock.
    1873             :  */
    1874             : void
    1875           1 : nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
    1876             : {
    1877           1 :         assert(ctrlr->is_disconnecting == true);
    1878             : 
    1879           1 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    1880           1 : }
    1881             : 
    1882             : int
    1883           2 : nvme_ctrlr_disable_poll(struct spdk_nvme_ctrlr *ctrlr)
    1884             : {
    1885           2 :         int rc = 0;
    1886             : 
    1887           2 :         if (nvme_ctrlr_process_init(ctrlr) != 0) {
    1888           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to disable controller\n");
    1889           0 :                 rc = -1;
    1890           0 :         }
    1891             : 
    1892           2 :         if (ctrlr->state != NVME_CTRLR_STATE_DISABLED && rc != -1) {
    1893           1 :                 return -EAGAIN;
    1894             :         }
    1895             : 
    1896           1 :         return rc;
    1897           2 : }
    1898             : 
    1899             : static void
    1900           1 : nvme_ctrlr_fail_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
    1901             : {
    1902             :         struct spdk_nvme_qpair  *qpair;
    1903             : 
    1904           1 :         TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
    1905           0 :                 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
    1906           0 :         }
    1907           1 : }
    1908             : 
    1909             : int
    1910           2 : spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
    1911             : {
    1912             :         int rc;
    1913             : 
    1914           2 :         nvme_ctrlr_lock(ctrlr);
    1915             : 
    1916           2 :         rc = nvme_ctrlr_disconnect(ctrlr);
    1917           2 :         if (rc == 0) {
    1918           1 :                 nvme_ctrlr_fail_io_qpairs(ctrlr);
    1919           1 :         }
    1920             : 
    1921           2 :         nvme_ctrlr_unlock(ctrlr);
    1922             : 
    1923           2 :         if (rc != 0) {
    1924           1 :                 if (rc == -EBUSY) {
    1925           1 :                         rc = 0;
    1926           1 :                 }
    1927           1 :                 return rc;
    1928             :         }
    1929             : 
    1930           1 :         while (1) {
    1931           1 :                 rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr);
    1932           1 :                 if (rc == -ENXIO) {
    1933           1 :                         break;
    1934             :                 }
    1935             :         }
    1936             : 
    1937           1 :         spdk_nvme_ctrlr_reconnect_async(ctrlr);
    1938             : 
    1939          25 :         while (true) {
    1940          25 :                 rc = spdk_nvme_ctrlr_reconnect_poll_async(ctrlr);
    1941          25 :                 if (rc != -EAGAIN) {
    1942           1 :                         break;
    1943             :                 }
    1944             :         }
    1945             : 
    1946           1 :         return rc;
    1947           2 : }
    1948             : 
    1949             : int
    1950           0 : spdk_nvme_ctrlr_reset_subsystem(struct spdk_nvme_ctrlr *ctrlr)
    1951             : {
    1952             :         union spdk_nvme_cap_register cap;
    1953           0 :         int rc = 0;
    1954             : 
    1955           0 :         cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
    1956           0 :         if (cap.bits.nssrs == 0) {
    1957           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "subsystem reset is not supported\n");
    1958           0 :                 return -ENOTSUP;
    1959             :         }
    1960             : 
    1961           0 :         NVME_CTRLR_NOTICELOG(ctrlr, "resetting subsystem\n");
    1962           0 :         nvme_ctrlr_lock(ctrlr);
    1963           0 :         ctrlr->is_resetting = true;
    1964           0 :         rc = nvme_ctrlr_set_nssr(ctrlr, SPDK_NVME_NSSR_VALUE);
    1965           0 :         ctrlr->is_resetting = false;
    1966             : 
    1967           0 :         nvme_ctrlr_unlock(ctrlr);
    1968             :         /*
    1969             :          * No more cleanup at this point like in the ctrlr reset. A subsystem reset will cause
    1970             :          * a hot remove for PCIe transport. The hot remove handling does all the necessary ctrlr cleanup.
    1971             :          */
    1972           0 :         return rc;
    1973           0 : }
    1974             : 
    1975             : int
    1976           4 : spdk_nvme_ctrlr_set_trid(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_transport_id *trid)
    1977             : {
    1978           4 :         int rc = 0;
    1979             : 
    1980           4 :         nvme_ctrlr_lock(ctrlr);
    1981             : 
    1982           4 :         if (ctrlr->is_failed == false) {
    1983           1 :                 rc = -EPERM;
    1984           1 :                 goto out;
    1985             :         }
    1986             : 
    1987           3 :         if (trid->trtype != ctrlr->trid.trtype) {
    1988           1 :                 rc = -EINVAL;
    1989           1 :                 goto out;
    1990             :         }
    1991             : 
    1992           2 :         if (strncmp(trid->subnqn, ctrlr->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
    1993           1 :                 rc = -EINVAL;
    1994           1 :                 goto out;
    1995             :         }
    1996             : 
    1997           1 :         ctrlr->trid = *trid;
    1998             : 
    1999             : out:
    2000           4 :         nvme_ctrlr_unlock(ctrlr);
    2001           4 :         return rc;
    2002             : }
    2003             : 
    2004             : void
    2005           0 : spdk_nvme_ctrlr_set_remove_cb(struct spdk_nvme_ctrlr *ctrlr,
    2006             :                               spdk_nvme_remove_cb remove_cb, void *remove_ctx)
    2007             : {
    2008           0 :         if (!spdk_process_is_primary()) {
    2009           0 :                 return;
    2010             :         }
    2011             : 
    2012           0 :         nvme_ctrlr_lock(ctrlr);
    2013           0 :         ctrlr->remove_cb = remove_cb;
    2014           0 :         ctrlr->cb_ctx = remove_ctx;
    2015           0 :         nvme_ctrlr_unlock(ctrlr);
    2016           0 : }
    2017             : 
    2018             : int
    2019           0 : spdk_nvme_ctrlr_set_keys(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ctrlr_key_opts *opts)
    2020             : {
    2021           0 :         nvme_ctrlr_lock(ctrlr);
    2022           0 :         if (SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key) == NULL &&
    2023           0 :             SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key) != NULL) {
    2024           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "DH-HMAC-CHAP controller key requires host key to be set\n");
    2025           0 :                 nvme_ctrlr_unlock(ctrlr);
    2026           0 :                 return -EINVAL;
    2027             :         }
    2028             : 
    2029           0 :         ctrlr->opts.dhchap_key =
    2030           0 :                 SPDK_GET_FIELD(opts, dhchap_key, ctrlr->opts.dhchap_key);
    2031           0 :         ctrlr->opts.dhchap_ctrlr_key =
    2032           0 :                 SPDK_GET_FIELD(opts, dhchap_ctrlr_key, ctrlr->opts.dhchap_ctrlr_key);
    2033           0 :         nvme_ctrlr_unlock(ctrlr);
    2034             : 
    2035           0 :         return 0;
    2036           0 : }
    2037             : 
    2038             : static void
    2039          16 : nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2040             : {
    2041          16 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2042             : 
    2043          16 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2044           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_identify_controller failed!\n");
    2045           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2046           0 :                 return;
    2047             :         }
    2048             : 
    2049             :         /*
    2050             :          * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
    2051             :          *  controller supports.
    2052             :          */
    2053          16 :         ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
    2054          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
    2055          16 :         if (ctrlr->cdata.mdts > 0) {
    2056           0 :                 ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
    2057             :                                                 ctrlr->min_page_size * (1 << ctrlr->cdata.mdts));
    2058           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
    2059           0 :         }
    2060             : 
    2061          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
    2062          16 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    2063           1 :                 ctrlr->cntlid = ctrlr->cdata.cntlid;
    2064           1 :         } else {
    2065             :                 /*
    2066             :                  * Fabrics controllers should already have CNTLID from the Connect command.
    2067             :                  *
    2068             :                  * If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
    2069             :                  * trust the one from Connect.
    2070             :                  */
    2071          15 :                 if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
    2072           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
    2073             :                                             ctrlr->cdata.cntlid, ctrlr->cntlid);
    2074           0 :                 }
    2075             :         }
    2076             : 
    2077          16 :         if (ctrlr->cdata.sgls.supported && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2078           0 :                 assert(ctrlr->cdata.sgls.supported != 0x3);
    2079           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
    2080           0 :                 if (ctrlr->cdata.sgls.supported == 0x2) {
    2081           0 :                         ctrlr->flags |= SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT;
    2082           0 :                 }
    2083             : 
    2084           0 :                 ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
    2085           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "transport max_sges %u\n", ctrlr->max_sges);
    2086           0 :         }
    2087             : 
    2088          16 :         if (ctrlr->cdata.sgls.metadata_address && !(ctrlr->quirks & NVME_QUIRK_NOT_USE_SGL)) {
    2089           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED;
    2090           0 :         }
    2091             : 
    2092          16 :         if (ctrlr->cdata.oacs.security && !(ctrlr->quirks & NVME_QUIRK_OACS_SECURITY)) {
    2093           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED;
    2094           0 :         }
    2095             : 
    2096          16 :         if (ctrlr->cdata.oacs.directives) {
    2097           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED;
    2098           0 :         }
    2099             : 
    2100          16 :         NVME_CTRLR_DEBUGLOG(ctrlr, "fuses compare and write: %d\n",
    2101             :                             ctrlr->cdata.fuses.compare_and_write);
    2102          16 :         if (ctrlr->cdata.fuses.compare_and_write) {
    2103           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_COMPARE_AND_WRITE_SUPPORTED;
    2104           0 :         }
    2105             : 
    2106          32 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
    2107          16 :                              ctrlr->opts.admin_timeout_ms);
    2108          16 : }
    2109             : 
    2110             : static int
    2111          16 : nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
    2112             : {
    2113             :         int     rc;
    2114             : 
    2115          32 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
    2116          16 :                              ctrlr->opts.admin_timeout_ms);
    2117             : 
    2118          32 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0, 0,
    2119          16 :                                      &ctrlr->cdata, sizeof(ctrlr->cdata),
    2120          16 :                                      nvme_ctrlr_identify_done, ctrlr);
    2121          16 :         if (rc != 0) {
    2122           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2123           0 :                 return rc;
    2124             :         }
    2125             : 
    2126          16 :         return 0;
    2127          16 : }
    2128             : 
    2129             : static void
    2130           0 : nvme_ctrlr_get_zns_cmd_and_effects_log_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2131             : {
    2132             :         struct spdk_nvme_cmds_and_effect_log_page *log_page;
    2133           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    2134             : 
    2135           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2136           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_get_zns_cmd_and_effects_log failed!\n");
    2137           0 :                 spdk_free(ctrlr->tmp_ptr);
    2138           0 :                 ctrlr->tmp_ptr = NULL;
    2139           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2140           0 :                 return;
    2141             :         }
    2142             : 
    2143           0 :         log_page = ctrlr->tmp_ptr;
    2144             : 
    2145           0 :         if (log_page->io_cmds_supported[SPDK_NVME_OPC_ZONE_APPEND].csupp) {
    2146           0 :                 ctrlr->flags |= SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED;
    2147           0 :         }
    2148           0 :         spdk_free(ctrlr->tmp_ptr);
    2149           0 :         ctrlr->tmp_ptr = NULL;
    2150             : 
    2151           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, ctrlr->opts.admin_timeout_ms);
    2152           0 : }
    2153             : 
    2154             : static int
    2155           0 : nvme_ctrlr_get_zns_cmd_and_effects_log(struct spdk_nvme_ctrlr *ctrlr)
    2156             : {
    2157             :         int rc;
    2158             : 
    2159           0 :         assert(!ctrlr->tmp_ptr);
    2160           0 :         ctrlr->tmp_ptr = spdk_zmalloc(sizeof(struct spdk_nvme_cmds_and_effect_log_page), 64, NULL,
    2161             :                                       SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2162           0 :         if (!ctrlr->tmp_ptr) {
    2163           0 :                 rc = -ENOMEM;
    2164           0 :                 goto error;
    2165             :         }
    2166             : 
    2167           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG,
    2168           0 :                              ctrlr->opts.admin_timeout_ms);
    2169             : 
    2170           0 :         rc = spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, SPDK_NVME_LOG_COMMAND_EFFECTS_LOG,
    2171           0 :                         0, ctrlr->tmp_ptr, sizeof(struct spdk_nvme_cmds_and_effect_log_page),
    2172             :                         0, 0, 0, SPDK_NVME_CSI_ZNS << 24,
    2173           0 :                         nvme_ctrlr_get_zns_cmd_and_effects_log_done, ctrlr);
    2174           0 :         if (rc != 0) {
    2175           0 :                 goto error;
    2176             :         }
    2177             : 
    2178           0 :         return 0;
    2179             : 
    2180             : error:
    2181           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2182           0 :         spdk_free(ctrlr->tmp_ptr);
    2183           0 :         ctrlr->tmp_ptr = NULL;
    2184           0 :         return rc;
    2185           0 : }
    2186             : 
    2187             : static void
    2188           0 : nvme_ctrlr_identify_zns_specific_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2189             : {
    2190           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2191             : 
    2192           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2193             :                 /* no need to print an error, the controller simply does not support ZNS */
    2194           0 :                 nvme_ctrlr_free_zns_specific_data(ctrlr);
    2195           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2196           0 :                                      ctrlr->opts.admin_timeout_ms);
    2197           0 :                 return;
    2198             :         }
    2199             : 
    2200             :         /* A zero zasl value means use mdts */
    2201           0 :         if (ctrlr->cdata_zns->zasl) {
    2202           0 :                 uint32_t max_append = ctrlr->min_page_size * (1 << ctrlr->cdata_zns->zasl);
    2203           0 :                 ctrlr->max_zone_append_size = spdk_min(ctrlr->max_xfer_size, max_append);
    2204           0 :         } else {
    2205           0 :                 ctrlr->max_zone_append_size = ctrlr->max_xfer_size;
    2206             :         }
    2207             : 
    2208           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG,
    2209           0 :                              ctrlr->opts.admin_timeout_ms);
    2210           0 : }
    2211             : 
    2212             : /**
    2213             :  * This function will try to fetch the I/O Command Specific Controller data structure for
    2214             :  * each I/O Command Set supported by SPDK.
    2215             :  *
    2216             :  * If an I/O Command Set is not supported by the controller, "Invalid Field in Command"
    2217             :  * will be returned. Since we are fetching in a exploratively way, getting an error back
    2218             :  * from the controller should not be treated as fatal.
    2219             :  *
    2220             :  * I/O Command Sets not supported by SPDK will be skipped (e.g. Key Value Command Set).
    2221             :  *
    2222             :  * I/O Command Sets without a IOCS specific data structure (i.e. a zero-filled IOCS specific
    2223             :  * data structure) will be skipped (e.g. NVM Command Set, Key Value Command Set).
    2224             :  */
    2225             : static int
    2226          19 : nvme_ctrlr_identify_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2227             : {
    2228             :         int     rc;
    2229             : 
    2230          19 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2231          38 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
    2232          19 :                                      ctrlr->opts.admin_timeout_ms);
    2233          19 :                 return 0;
    2234             :         }
    2235             : 
    2236             :         /*
    2237             :          * Since SPDK currently only needs to fetch a single Command Set, keep the code here,
    2238             :          * instead of creating multiple NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC substates,
    2239             :          * which would require additional functions and complexity for no good reason.
    2240             :          */
    2241           0 :         assert(!ctrlr->cdata_zns);
    2242           0 :         ctrlr->cdata_zns = spdk_zmalloc(sizeof(*ctrlr->cdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2243             :                                         SPDK_MALLOC_SHARE | SPDK_MALLOC_DMA);
    2244           0 :         if (!ctrlr->cdata_zns) {
    2245           0 :                 rc = -ENOMEM;
    2246           0 :                 goto error;
    2247             :         }
    2248             : 
    2249           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC,
    2250           0 :                              ctrlr->opts.admin_timeout_ms);
    2251             : 
    2252           0 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR_IOCS, 0, 0, SPDK_NVME_CSI_ZNS,
    2253           0 :                                      ctrlr->cdata_zns, sizeof(*ctrlr->cdata_zns),
    2254           0 :                                      nvme_ctrlr_identify_zns_specific_done, ctrlr);
    2255           0 :         if (rc != 0) {
    2256           0 :                 goto error;
    2257             :         }
    2258             : 
    2259           0 :         return 0;
    2260             : 
    2261             : error:
    2262           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2263           0 :         nvme_ctrlr_free_zns_specific_data(ctrlr);
    2264           0 :         return rc;
    2265          19 : }
    2266             : 
    2267             : enum nvme_active_ns_state {
    2268             :         NVME_ACTIVE_NS_STATE_IDLE,
    2269             :         NVME_ACTIVE_NS_STATE_PROCESSING,
    2270             :         NVME_ACTIVE_NS_STATE_DONE,
    2271             :         NVME_ACTIVE_NS_STATE_ERROR
    2272             : };
    2273             : 
    2274             : typedef void (*nvme_active_ns_ctx_deleter)(struct nvme_active_ns_ctx *);
    2275             : 
    2276             : struct nvme_active_ns_ctx {
    2277             :         struct spdk_nvme_ctrlr *ctrlr;
    2278             :         uint32_t page_count;
    2279             :         uint32_t next_nsid;
    2280             :         uint32_t *new_ns_list;
    2281             :         nvme_active_ns_ctx_deleter deleter;
    2282             : 
    2283             :         enum nvme_active_ns_state state;
    2284             : };
    2285             : 
    2286             : static struct nvme_active_ns_ctx *
    2287          45 : nvme_active_ns_ctx_create(struct spdk_nvme_ctrlr *ctrlr, nvme_active_ns_ctx_deleter deleter)
    2288             : {
    2289             :         struct nvme_active_ns_ctx *ctx;
    2290          45 :         uint32_t *new_ns_list = NULL;
    2291             : 
    2292          45 :         ctx = calloc(1, sizeof(*ctx));
    2293          45 :         if (!ctx) {
    2294           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate nvme_active_ns_ctx!\n");
    2295           0 :                 return NULL;
    2296             :         }
    2297             : 
    2298          45 :         new_ns_list = spdk_zmalloc(sizeof(struct spdk_nvme_ns_list), ctrlr->page_size,
    2299             :                                    NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_SHARE);
    2300          45 :         if (!new_ns_list) {
    2301           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate active_ns_list!\n");
    2302           0 :                 free(ctx);
    2303           0 :                 return NULL;
    2304             :         }
    2305             : 
    2306          45 :         ctx->page_count = 1;
    2307          45 :         ctx->new_ns_list = new_ns_list;
    2308          45 :         ctx->ctrlr = ctrlr;
    2309          45 :         ctx->deleter = deleter;
    2310             : 
    2311          45 :         return ctx;
    2312          45 : }
    2313             : 
    2314             : static void
    2315          45 : nvme_active_ns_ctx_destroy(struct nvme_active_ns_ctx *ctx)
    2316             : {
    2317          45 :         spdk_free(ctx->new_ns_list);
    2318          45 :         free(ctx);
    2319          45 : }
    2320             : 
    2321             : static int
    2322       18403 : nvme_ctrlr_destruct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2323             : {
    2324             :         struct spdk_nvme_ns tmp, *ns;
    2325             : 
    2326       18403 :         assert(ctrlr != NULL);
    2327             : 
    2328       18403 :         tmp.id = nsid;
    2329       18403 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    2330       18403 :         if (ns == NULL) {
    2331           0 :                 return -EINVAL;
    2332             :         }
    2333             : 
    2334       18403 :         nvme_ns_destruct(ns);
    2335       18403 :         ns->active = false;
    2336             : 
    2337       18403 :         return 0;
    2338       18403 : }
    2339             : 
    2340             : static int
    2341       12311 : nvme_ctrlr_construct_namespace(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    2342             : {
    2343             :         struct spdk_nvme_ns *ns;
    2344             : 
    2345       12311 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    2346           0 :                 return -EINVAL;
    2347             :         }
    2348             : 
    2349             :         /* Namespaces are constructed on demand, so simply request it. */
    2350       12311 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2351       12311 :         if (ns == NULL) {
    2352           0 :                 return -ENOMEM;
    2353             :         }
    2354             : 
    2355       12311 :         ns->active = true;
    2356             : 
    2357       12311 :         return 0;
    2358       12311 : }
    2359             : 
    2360             : static void
    2361          44 : nvme_ctrlr_identify_active_ns_swap(struct spdk_nvme_ctrlr *ctrlr, uint32_t *new_ns_list,
    2362             :                                    size_t max_entries)
    2363             : {
    2364          44 :         uint32_t active_ns_count = 0;
    2365             :         size_t i;
    2366             :         uint32_t nsid;
    2367             :         struct spdk_nvme_ns *ns, *tmp_ns;
    2368             :         int rc;
    2369             : 
    2370             :         /* First, remove namespaces that no longer exist */
    2371       15387 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    2372       15343 :                 nsid = new_ns_list[0];
    2373       15343 :                 active_ns_count = 0;
    2374     3547429 :                 while (nsid != 0) {
    2375     3536712 :                         if (nsid == ns->id) {
    2376        4626 :                                 break;
    2377             :                         }
    2378             : 
    2379     3532086 :                         nsid = new_ns_list[active_ns_count++];
    2380             :                 }
    2381             : 
    2382       15343 :                 if (nsid != ns->id) {
    2383             :                         /* Did not find this namespace id in the new list. */
    2384       10717 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was removed\n", ns->id);
    2385       10717 :                         nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    2386       10717 :                 }
    2387       15343 :         }
    2388             : 
    2389             :         /* Next, add new namespaces */
    2390          44 :         active_ns_count = 0;
    2391       12355 :         for (i = 0; i < max_entries; i++) {
    2392       12355 :                 nsid = new_ns_list[active_ns_count];
    2393             : 
    2394       12355 :                 if (nsid == 0) {
    2395          44 :                         break;
    2396             :                 }
    2397             : 
    2398             :                 /* If the namespace already exists, this will not construct it a second time. */
    2399       12311 :                 rc = nvme_ctrlr_construct_namespace(ctrlr, nsid);
    2400       12311 :                 if (rc != 0) {
    2401             :                         /* We can't easily handle a failure here. But just move on. */
    2402           0 :                         assert(false);
    2403             :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to allocate a namespace object.\n");
    2404             :                         continue;
    2405             :                 }
    2406             : 
    2407       12311 :                 active_ns_count++;
    2408       12311 :         }
    2409             : 
    2410          44 :         ctrlr->active_ns_count = active_ns_count;
    2411          44 : }
    2412             : 
    2413             : static void
    2414          30 : nvme_ctrlr_identify_active_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2415             : {
    2416          30 :         struct nvme_active_ns_ctx *ctx = arg;
    2417          30 :         uint32_t *new_ns_list = NULL;
    2418             : 
    2419          30 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2420           1 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2421           1 :                 goto out;
    2422             :         }
    2423             : 
    2424          29 :         ctx->next_nsid = ctx->new_ns_list[1024 * ctx->page_count - 1];
    2425          29 :         if (ctx->next_nsid == 0) {
    2426          24 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2427          24 :                 goto out;
    2428             :         }
    2429             : 
    2430           5 :         ctx->page_count++;
    2431          10 :         new_ns_list = spdk_realloc(ctx->new_ns_list,
    2432           5 :                                    ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2433           5 :                                    ctx->ctrlr->page_size);
    2434           5 :         if (!new_ns_list) {
    2435           0 :                 SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2436           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2437           0 :                 goto out;
    2438             :         }
    2439             : 
    2440           5 :         ctx->new_ns_list = new_ns_list;
    2441           5 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2442           5 :         return;
    2443             : 
    2444             : out:
    2445          25 :         if (ctx->deleter) {
    2446           9 :                 ctx->deleter(ctx);
    2447           9 :         }
    2448          30 : }
    2449             : 
    2450             : static void
    2451          50 : nvme_ctrlr_identify_active_ns_async(struct nvme_active_ns_ctx *ctx)
    2452             : {
    2453          50 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2454             :         uint32_t i;
    2455             :         int rc;
    2456             : 
    2457          50 :         if (ctrlr->cdata.nn == 0) {
    2458          16 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2459          16 :                 goto out;
    2460             :         }
    2461             : 
    2462          34 :         assert(ctx->new_ns_list != NULL);
    2463             : 
    2464             :         /*
    2465             :          * If controller doesn't support active ns list CNS 0x02 dummy up
    2466             :          * an active ns list, i.e. all namespaces report as active
    2467             :          */
    2468          34 :         if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 1, 0) || ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS) {
    2469             :                 uint32_t *new_ns_list;
    2470             : 
    2471             :                 /*
    2472             :                  * Active NS list must always end with zero element.
    2473             :                  * So, we allocate for cdata.nn+1.
    2474             :                  */
    2475           4 :                 ctx->page_count = spdk_divide_round_up(ctrlr->cdata.nn + 1,
    2476             :                                                        sizeof(struct spdk_nvme_ns_list) / sizeof(new_ns_list[0]));
    2477           8 :                 new_ns_list = spdk_realloc(ctx->new_ns_list,
    2478           4 :                                            ctx->page_count * sizeof(struct spdk_nvme_ns_list),
    2479           4 :                                            ctx->ctrlr->page_size);
    2480           4 :                 if (!new_ns_list) {
    2481           0 :                         SPDK_ERRLOG("Failed to reallocate active_ns_list!\n");
    2482           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2483           0 :                         goto out;
    2484             :                 }
    2485             : 
    2486           4 :                 ctx->new_ns_list = new_ns_list;
    2487           4 :                 ctx->new_ns_list[ctrlr->cdata.nn] = 0;
    2488        4091 :                 for (i = 0; i < ctrlr->cdata.nn; i++) {
    2489        4087 :                         ctx->new_ns_list[i] = i + 1;
    2490        4087 :                 }
    2491             : 
    2492           4 :                 ctx->state = NVME_ACTIVE_NS_STATE_DONE;
    2493           4 :                 goto out;
    2494             :         }
    2495             : 
    2496          30 :         ctx->state = NVME_ACTIVE_NS_STATE_PROCESSING;
    2497          60 :         rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, ctx->next_nsid, 0,
    2498          30 :                                      &ctx->new_ns_list[1024 * (ctx->page_count - 1)], sizeof(struct spdk_nvme_ns_list),
    2499          30 :                                      nvme_ctrlr_identify_active_ns_async_done, ctx);
    2500          30 :         if (rc != 0) {
    2501           0 :                 ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2502           0 :                 goto out;
    2503             :         }
    2504             : 
    2505          30 :         return;
    2506             : 
    2507             : out:
    2508          20 :         if (ctx->deleter) {
    2509          15 :                 ctx->deleter(ctx);
    2510          15 :         }
    2511          50 : }
    2512             : 
    2513             : static void
    2514          24 : _nvme_active_ns_ctx_deleter(struct nvme_active_ns_ctx *ctx)
    2515             : {
    2516          24 :         struct spdk_nvme_ctrlr *ctrlr = ctx->ctrlr;
    2517             :         struct spdk_nvme_ns *ns;
    2518             : 
    2519          24 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2520           0 :                 nvme_active_ns_ctx_destroy(ctx);
    2521           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2522           0 :                 return;
    2523             :         }
    2524             : 
    2525          24 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2526             : 
    2527          28 :         RB_FOREACH(ns, nvme_ns_tree, &ctrlr->ns) {
    2528           4 :                 nvme_ns_free_iocs_specific_data(ns);
    2529           4 :         }
    2530             : 
    2531          24 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2532          24 :         nvme_active_ns_ctx_destroy(ctx);
    2533          24 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, ctrlr->opts.admin_timeout_ms);
    2534          24 : }
    2535             : 
    2536             : static void
    2537          24 : _nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2538             : {
    2539             :         struct nvme_active_ns_ctx *ctx;
    2540             : 
    2541          24 :         ctx = nvme_active_ns_ctx_create(ctrlr, _nvme_active_ns_ctx_deleter);
    2542          24 :         if (!ctx) {
    2543           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2544           0 :                 return;
    2545             :         }
    2546             : 
    2547          48 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS,
    2548          24 :                              ctrlr->opts.admin_timeout_ms);
    2549          24 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2550          24 : }
    2551             : 
    2552             : int
    2553          21 : nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    2554             : {
    2555             :         struct nvme_active_ns_ctx *ctx;
    2556             :         int rc;
    2557             : 
    2558          21 :         ctx = nvme_active_ns_ctx_create(ctrlr, NULL);
    2559          21 :         if (!ctx) {
    2560           0 :                 return -ENOMEM;
    2561             :         }
    2562             : 
    2563          21 :         nvme_ctrlr_identify_active_ns_async(ctx);
    2564          21 :         while (ctx->state == NVME_ACTIVE_NS_STATE_PROCESSING) {
    2565           0 :                 rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    2566           0 :                 if (rc < 0) {
    2567           0 :                         ctx->state = NVME_ACTIVE_NS_STATE_ERROR;
    2568           0 :                         break;
    2569             :                 }
    2570             :         }
    2571             : 
    2572          21 :         if (ctx->state == NVME_ACTIVE_NS_STATE_ERROR) {
    2573           1 :                 nvme_active_ns_ctx_destroy(ctx);
    2574           1 :                 return -ENXIO;
    2575             :         }
    2576             : 
    2577          20 :         assert(ctx->state == NVME_ACTIVE_NS_STATE_DONE);
    2578          20 :         nvme_ctrlr_identify_active_ns_swap(ctrlr, ctx->new_ns_list, ctx->page_count * 1024);
    2579          20 :         nvme_active_ns_ctx_destroy(ctx);
    2580             : 
    2581          20 :         return 0;
    2582          21 : }
    2583             : 
    2584             : static void
    2585          21 : nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2586             : {
    2587          21 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2588          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2589             :         uint32_t nsid;
    2590             :         int rc;
    2591             : 
    2592          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2593           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2594           0 :                 return;
    2595             :         }
    2596             : 
    2597          21 :         nvme_ns_set_identify_data(ns);
    2598             : 
    2599             :         /* move on to the next active NS */
    2600          21 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2601          21 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2602          21 :         if (ns == NULL) {
    2603          12 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2604           6 :                                      ctrlr->opts.admin_timeout_ms);
    2605           6 :                 return;
    2606             :         }
    2607          15 :         ns->ctrlr = ctrlr;
    2608          15 :         ns->id = nsid;
    2609             : 
    2610          15 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2611          15 :         if (rc) {
    2612           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2613           0 :         }
    2614          21 : }
    2615             : 
    2616             : static int
    2617          21 : nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
    2618             : {
    2619          21 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2620             :         struct spdk_nvme_ns_data *nsdata;
    2621             : 
    2622          21 :         nsdata = &ns->nsdata;
    2623             : 
    2624          42 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
    2625          21 :                              ctrlr->opts.admin_timeout_ms);
    2626          42 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id, 0,
    2627          21 :                                        nsdata, sizeof(*nsdata),
    2628          21 :                                        nvme_ctrlr_identify_ns_async_done, ns);
    2629             : }
    2630             : 
    2631             : static int
    2632          14 : nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2633             : {
    2634             :         uint32_t nsid;
    2635             :         struct spdk_nvme_ns *ns;
    2636             :         int rc;
    2637             : 
    2638          14 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2639          14 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2640          14 :         if (ns == NULL) {
    2641             :                 /* No active NS, move on to the next state */
    2642          16 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
    2643           8 :                                      ctrlr->opts.admin_timeout_ms);
    2644           8 :                 return 0;
    2645             :         }
    2646             : 
    2647           6 :         ns->ctrlr = ctrlr;
    2648           6 :         ns->id = nsid;
    2649             : 
    2650           6 :         rc = nvme_ctrlr_identify_ns_async(ns);
    2651           6 :         if (rc) {
    2652           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2653           0 :         }
    2654             : 
    2655           6 :         return rc;
    2656          14 : }
    2657             : 
    2658             : static int
    2659           4 : nvme_ctrlr_identify_namespaces_iocs_specific_next(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    2660             : {
    2661             :         uint32_t nsid;
    2662             :         struct spdk_nvme_ns *ns;
    2663             :         int rc;
    2664             : 
    2665           4 :         if (!prev_nsid) {
    2666           2 :                 nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2667           2 :         } else {
    2668             :                 /* move on to the next active NS */
    2669           2 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, prev_nsid);
    2670             :         }
    2671             : 
    2672           4 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2673           4 :         if (ns == NULL) {
    2674             :                 /* No first/next active NS, move on to the next state */
    2675           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2676           1 :                                      ctrlr->opts.admin_timeout_ms);
    2677           1 :                 return 0;
    2678             :         }
    2679             : 
    2680             :         /* loop until we find a ns which has (supported) iocs specific data */
    2681          10 :         while (!nvme_ns_has_supported_iocs_specific_data(ns)) {
    2682           8 :                 nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2683           8 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2684           8 :                 if (ns == NULL) {
    2685             :                         /* no namespace with (supported) iocs specific data found */
    2686           2 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2687           1 :                                              ctrlr->opts.admin_timeout_ms);
    2688           1 :                         return 0;
    2689             :                 }
    2690             :         }
    2691             : 
    2692           2 :         rc = nvme_ctrlr_identify_ns_iocs_specific_async(ns);
    2693           2 :         if (rc) {
    2694           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2695           1 :         }
    2696             : 
    2697           2 :         return rc;
    2698           4 : }
    2699             : 
    2700             : static void
    2701           0 : nvme_ctrlr_identify_ns_zns_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2702             : {
    2703           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2704           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2705             : 
    2706           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2707           0 :                 nvme_ns_free_zns_specific_data(ns);
    2708           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2709           0 :                 return;
    2710             :         }
    2711             : 
    2712           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2713           0 : }
    2714             : 
    2715             : static int
    2716           2 : nvme_ctrlr_identify_ns_zns_specific_async(struct spdk_nvme_ns *ns)
    2717             : {
    2718           2 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2719             :         int rc;
    2720             : 
    2721           2 :         assert(!ns->nsdata_zns);
    2722           2 :         ns->nsdata_zns = spdk_zmalloc(sizeof(*ns->nsdata_zns), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2723             :                                       SPDK_MALLOC_SHARE);
    2724           2 :         if (!ns->nsdata_zns) {
    2725           0 :                 return -ENOMEM;
    2726             :         }
    2727             : 
    2728           4 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2729           2 :                              ctrlr->opts.admin_timeout_ms);
    2730           4 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2731           2 :                                      ns->nsdata_zns, sizeof(*ns->nsdata_zns),
    2732           2 :                                      nvme_ctrlr_identify_ns_zns_specific_async_done, ns);
    2733           2 :         if (rc) {
    2734           1 :                 nvme_ns_free_zns_specific_data(ns);
    2735           1 :         }
    2736             : 
    2737           2 :         return rc;
    2738           2 : }
    2739             : 
    2740             : static void
    2741           0 : nvme_ctrlr_identify_ns_nvm_specific_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2742             : {
    2743           0 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2744           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2745             : 
    2746           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2747           0 :                 nvme_ns_free_nvm_specific_data(ns);
    2748           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2749           0 :                 return;
    2750             :         }
    2751             : 
    2752           0 :         nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, ns->id);
    2753           0 : }
    2754             : 
    2755             : static int
    2756           0 : nvme_ctrlr_identify_ns_nvm_specific_async(struct spdk_nvme_ns *ns)
    2757             : {
    2758           0 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2759             :         int rc;
    2760             : 
    2761           0 :         assert(!ns->nsdata_nvm);
    2762           0 :         ns->nsdata_nvm = spdk_zmalloc(sizeof(*ns->nsdata_nvm), 64, NULL, SPDK_ENV_NUMA_ID_ANY,
    2763             :                                       SPDK_MALLOC_SHARE);
    2764           0 :         if (!ns->nsdata_nvm) {
    2765           0 :                 return -ENOMEM;
    2766             :         }
    2767             : 
    2768           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC,
    2769           0 :                              ctrlr->opts.admin_timeout_ms);
    2770           0 :         rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_IOCS, 0, ns->id, ns->csi,
    2771           0 :                                      ns->nsdata_nvm, sizeof(*ns->nsdata_nvm),
    2772           0 :                                      nvme_ctrlr_identify_ns_nvm_specific_async_done, ns);
    2773           0 :         if (rc) {
    2774           0 :                 nvme_ns_free_nvm_specific_data(ns);
    2775           0 :         }
    2776             : 
    2777           0 :         return rc;
    2778           0 : }
    2779             : 
    2780             : static int
    2781           2 : nvme_ctrlr_identify_ns_iocs_specific_async(struct spdk_nvme_ns *ns)
    2782             : {
    2783           2 :         switch (ns->csi) {
    2784             :         case SPDK_NVME_CSI_ZNS:
    2785           2 :                 return nvme_ctrlr_identify_ns_zns_specific_async(ns);
    2786             :         case SPDK_NVME_CSI_NVM:
    2787           0 :                 if (ns->ctrlr->cdata.ctratt.bits.elbas) {
    2788           0 :                         return nvme_ctrlr_identify_ns_nvm_specific_async(ns);
    2789             :                 }
    2790             :         /* fallthrough */
    2791             :         default:
    2792             :                 /*
    2793             :                  * This switch must handle all cases for which
    2794             :                  * nvme_ns_has_supported_iocs_specific_data() returns true,
    2795             :                  * other cases should never happen.
    2796             :                  */
    2797           0 :                 assert(0);
    2798             :         }
    2799             : 
    2800             :         return -EINVAL;
    2801           2 : }
    2802             : 
    2803             : static int
    2804          14 : nvme_ctrlr_identify_namespaces_iocs_specific(struct spdk_nvme_ctrlr *ctrlr)
    2805             : {
    2806          14 :         if (!nvme_ctrlr_multi_iocs_enabled(ctrlr)) {
    2807             :                 /* Multi IOCS not supported/enabled, move on to the next state */
    2808          28 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
    2809          14 :                                      ctrlr->opts.admin_timeout_ms);
    2810          14 :                 return 0;
    2811             :         }
    2812             : 
    2813           0 :         return nvme_ctrlr_identify_namespaces_iocs_specific_next(ctrlr, 0);
    2814          14 : }
    2815             : 
    2816             : static void
    2817           6 : nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2818             : {
    2819           6 :         struct spdk_nvme_ns *ns = (struct spdk_nvme_ns *)arg;
    2820           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2821             :         uint32_t nsid;
    2822             :         int rc;
    2823             : 
    2824           6 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2825             :                 /*
    2826             :                  * Many controllers claim to be compatible with NVMe 1.3, however,
    2827             :                  * they do not implement NS ID Desc List. Therefore, instead of setting
    2828             :                  * the state to NVME_CTRLR_STATE_ERROR, silently ignore the completion
    2829             :                  * error and move on to the next state.
    2830             :                  *
    2831             :                  * The proper way is to create a new quirk for controllers that violate
    2832             :                  * the NVMe 1.3 spec by not supporting NS ID Desc List.
    2833             :                  * (Re-using the NVME_QUIRK_IDENTIFY_CNS quirk is not possible, since
    2834             :                  * it is too generic and was added in order to handle controllers that
    2835             :                  * violate the NVMe 1.1 spec by not supporting ACTIVE LIST).
    2836             :                  */
    2837           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2838           0 :                                      ctrlr->opts.admin_timeout_ms);
    2839           0 :                 return;
    2840             :         }
    2841             : 
    2842           6 :         nvme_ns_set_id_desc_list_data(ns);
    2843             : 
    2844             :         /* move on to the next active NS */
    2845           6 :         nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
    2846           6 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2847           6 :         if (ns == NULL) {
    2848           4 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2849           2 :                                      ctrlr->opts.admin_timeout_ms);
    2850           2 :                 return;
    2851             :         }
    2852             : 
    2853           4 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2854           4 :         if (rc) {
    2855           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2856           0 :         }
    2857           6 : }
    2858             : 
    2859             : static int
    2860           6 : nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
    2861             : {
    2862           6 :         struct spdk_nvme_ctrlr *ctrlr = ns->ctrlr;
    2863             : 
    2864           6 :         memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
    2865             : 
    2866          12 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
    2867           6 :                              ctrlr->opts.admin_timeout_ms);
    2868          12 :         return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
    2869           6 :                                        0, ns->id, 0, ns->id_desc_list, sizeof(ns->id_desc_list),
    2870           6 :                                        nvme_ctrlr_identify_id_desc_async_done, ns);
    2871             : }
    2872             : 
    2873             : static int
    2874          14 : nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    2875             : {
    2876             :         uint32_t nsid;
    2877             :         struct spdk_nvme_ns *ns;
    2878             :         int rc;
    2879             : 
    2880          26 :         if ((ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) &&
    2881          12 :              !(ctrlr->cap.bits.css & SPDK_NVME_CAP_CSS_IOCS)) ||
    2882          14 :             (ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
    2883          12 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
    2884             :                 /* NS ID Desc List not supported, move on to the next state */
    2885          24 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2886          12 :                                      ctrlr->opts.admin_timeout_ms);
    2887          12 :                 return 0;
    2888             :         }
    2889             : 
    2890           2 :         nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    2891           2 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    2892           2 :         if (ns == NULL) {
    2893             :                 /* No active NS, move on to the next state */
    2894           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC,
    2895           0 :                                      ctrlr->opts.admin_timeout_ms);
    2896           0 :                 return 0;
    2897             :         }
    2898             : 
    2899           2 :         rc = nvme_ctrlr_identify_id_desc_async(ns);
    2900           2 :         if (rc) {
    2901           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2902           0 :         }
    2903             : 
    2904           2 :         return rc;
    2905          14 : }
    2906             : 
    2907             : static void
    2908          19 : nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
    2909             : {
    2910          19 :         if (spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
    2911           4 :                 if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
    2912           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Incorrect IOCCSZ %u, the minimum value should be 4\n",
    2913             :                                           ctrlr->cdata.nvmf_specific.ioccsz);
    2914           0 :                         ctrlr->cdata.nvmf_specific.ioccsz = 4;
    2915           0 :                         assert(0);
    2916             :                 }
    2917           4 :                 ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
    2918           4 :                 ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
    2919           4 :         }
    2920          19 : }
    2921             : 
    2922             : static void
    2923          19 : nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2924             : {
    2925             :         uint32_t cq_allocated, sq_allocated, min_allocated, i;
    2926          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2927             : 
    2928          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2929           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Number of Queues failed!\n");
    2930           0 :                 ctrlr->opts.num_io_queues = 0;
    2931           0 :         } else {
    2932             :                 /*
    2933             :                  * Data in cdw0 is 0-based.
    2934             :                  * Lower 16-bits indicate number of submission queues allocated.
    2935             :                  * Upper 16-bits indicate number of completion queues allocated.
    2936             :                  */
    2937          19 :                 sq_allocated = (cpl->cdw0 & 0xFFFF) + 1;
    2938          19 :                 cq_allocated = (cpl->cdw0 >> 16) + 1;
    2939             : 
    2940             :                 /*
    2941             :                  * For 1:1 queue mapping, set number of allocated queues to be minimum of
    2942             :                  * submission and completion queues.
    2943             :                  */
    2944          19 :                 min_allocated = spdk_min(sq_allocated, cq_allocated);
    2945             : 
    2946             :                 /* Set number of queues to be minimum of requested and actually allocated. */
    2947          19 :                 ctrlr->opts.num_io_queues = spdk_min(min_allocated, ctrlr->opts.num_io_queues);
    2948             :         }
    2949             : 
    2950          19 :         ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
    2951          19 :         if (ctrlr->free_io_qids == NULL) {
    2952           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2953           0 :                 return;
    2954             :         }
    2955             : 
    2956             :         /* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
    2957          69 :         for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
    2958          50 :                 spdk_nvme_ctrlr_free_qid(ctrlr, i);
    2959          50 :         }
    2960             : 
    2961          38 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
    2962          19 :                              ctrlr->opts.admin_timeout_ms);
    2963          19 : }
    2964             : 
    2965             : static int
    2966          19 : nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
    2967             : {
    2968             :         int rc;
    2969             : 
    2970          19 :         if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
    2971           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Limiting requested num_io_queues %u to max %d\n",
    2972             :                                      ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
    2973           0 :                 ctrlr->opts.num_io_queues = SPDK_NVME_MAX_IO_QUEUES;
    2974          19 :         } else if (ctrlr->opts.num_io_queues < 1) {
    2975          13 :                 NVME_CTRLR_NOTICELOG(ctrlr, "Requested num_io_queues 0, increasing to 1\n");
    2976          13 :                 ctrlr->opts.num_io_queues = 1;
    2977          13 :         }
    2978             : 
    2979          38 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
    2980          19 :                              ctrlr->opts.admin_timeout_ms);
    2981             : 
    2982          38 :         rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
    2983          19 :                                            nvme_ctrlr_set_num_queues_done, ctrlr);
    2984          19 :         if (rc != 0) {
    2985           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    2986           0 :                 return rc;
    2987             :         }
    2988             : 
    2989          19 :         return 0;
    2990          19 : }
    2991             : 
    2992             : static void
    2993           3 : nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cpl)
    2994             : {
    2995             :         uint32_t keep_alive_interval_us;
    2996           3 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    2997             : 
    2998           3 :         if (spdk_nvme_cpl_is_error(cpl)) {
    2999           2 :                 if ((cpl->status.sct == SPDK_NVME_SCT_GENERIC) &&
    3000           2 :                     (cpl->status.sc == SPDK_NVME_SC_INVALID_FIELD)) {
    3001           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Keep alive timeout Get Feature is not supported\n");
    3002           1 :                 } else {
    3003           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: SC %x SCT %x\n",
    3004             :                                           cpl->status.sc, cpl->status.sct);
    3005           1 :                         ctrlr->opts.keep_alive_timeout_ms = 0;
    3006           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3007           1 :                         return;
    3008             :                 }
    3009           1 :         } else {
    3010           1 :                 if (ctrlr->opts.keep_alive_timeout_ms != cpl->cdw0) {
    3011           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Controller adjusted keep alive timeout to %u ms\n",
    3012             :                                             cpl->cdw0);
    3013           1 :                 }
    3014             : 
    3015           1 :                 ctrlr->opts.keep_alive_timeout_ms = cpl->cdw0;
    3016             :         }
    3017             : 
    3018           2 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    3019           0 :                 ctrlr->keep_alive_interval_ticks = 0;
    3020           0 :         } else {
    3021           2 :                 keep_alive_interval_us = ctrlr->opts.keep_alive_timeout_ms * 1000 / 2;
    3022             : 
    3023           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Sending keep alive every %u us\n", keep_alive_interval_us);
    3024             : 
    3025           2 :                 ctrlr->keep_alive_interval_ticks = (keep_alive_interval_us * spdk_get_ticks_hz()) /
    3026             :                                                    UINT64_C(1000000);
    3027             : 
    3028             :                 /* Schedule the first Keep Alive to be sent as soon as possible. */
    3029           2 :                 ctrlr->next_keep_alive_tick = spdk_get_ticks();
    3030             :         }
    3031             : 
    3032           2 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3033           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    3034           0 :         } else {
    3035           4 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3036           2 :                                      ctrlr->opts.admin_timeout_ms);
    3037             :         }
    3038           3 : }
    3039             : 
    3040             : static int
    3041          22 : nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
    3042             : {
    3043             :         int rc;
    3044             : 
    3045          22 :         if (ctrlr->opts.keep_alive_timeout_ms == 0) {
    3046          19 :                 if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3047           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    3048           0 :                 } else {
    3049          38 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3050          19 :                                              ctrlr->opts.admin_timeout_ms);
    3051             :                 }
    3052          19 :                 return 0;
    3053             :         }
    3054             : 
    3055             :         /* Note: Discovery controller identify data does not populate KAS according to spec. */
    3056           3 :         if (!spdk_nvme_ctrlr_is_discovery(ctrlr) && ctrlr->cdata.kas == 0) {
    3057           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Controller KAS is 0 - not enabling Keep Alive\n");
    3058           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    3059           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC,
    3060           0 :                                      ctrlr->opts.admin_timeout_ms);
    3061           0 :                 return 0;
    3062             :         }
    3063             : 
    3064           6 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
    3065           3 :                              ctrlr->opts.admin_timeout_ms);
    3066             : 
    3067             :         /* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
    3068           6 :         rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
    3069           3 :                                              nvme_ctrlr_set_keep_alive_timeout_done, ctrlr);
    3070           3 :         if (rc != 0) {
    3071           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Keep alive timeout Get Feature failed: %d\n", rc);
    3072           0 :                 ctrlr->opts.keep_alive_timeout_ms = 0;
    3073           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3074           0 :                 return rc;
    3075             :         }
    3076             : 
    3077           3 :         return 0;
    3078          22 : }
    3079             : 
    3080             : static void
    3081           0 : nvme_ctrlr_set_host_id_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3082             : {
    3083           0 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3084             : 
    3085           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3086             :                 /*
    3087             :                  * Treat Set Features - Host ID failure as non-fatal, since the Host ID feature
    3088             :                  * is optional.
    3089             :                  */
    3090           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
    3091             :                                    cpl->status.sc, cpl->status.sct);
    3092           0 :         } else {
    3093           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Set Features - Host ID was successful\n");
    3094             :         }
    3095             : 
    3096           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3097           0 : }
    3098             : 
    3099             : static int
    3100          14 : nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
    3101             : {
    3102             :         uint8_t *host_id;
    3103             :         uint32_t host_id_size;
    3104             :         int rc;
    3105             : 
    3106          14 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    3107             :                 /*
    3108             :                  * NVMe-oF sends the host ID during Connect and doesn't allow
    3109             :                  * Set Features - Host Identifier after Connect, so we don't need to do anything here.
    3110             :                  */
    3111          14 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "NVMe-oF transport - not sending Set Features - Host ID\n");
    3112          14 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3113          14 :                 return 0;
    3114             :         }
    3115             : 
    3116           0 :         if (ctrlr->cdata.ctratt.bits.host_id_exhid_supported) {
    3117           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 128-bit extended host identifier\n");
    3118           0 :                 host_id = ctrlr->opts.extended_host_id;
    3119           0 :                 host_id_size = sizeof(ctrlr->opts.extended_host_id);
    3120           0 :         } else {
    3121           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Using 64-bit host identifier\n");
    3122           0 :                 host_id = ctrlr->opts.host_id;
    3123           0 :                 host_id_size = sizeof(ctrlr->opts.host_id);
    3124             :         }
    3125             : 
    3126             :         /* If the user specified an all-zeroes host identifier, don't send the command. */
    3127           0 :         if (spdk_mem_all_zero(host_id, host_id_size)) {
    3128           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "User did not specify host ID - not sending Set Features - Host ID\n");
    3129           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_TRANSPORT_READY, ctrlr->opts.admin_timeout_ms);
    3130           0 :                 return 0;
    3131             :         }
    3132             : 
    3133           0 :         SPDK_LOGDUMP(nvme, "host_id", host_id, host_id_size);
    3134             : 
    3135           0 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
    3136           0 :                              ctrlr->opts.admin_timeout_ms);
    3137             : 
    3138           0 :         rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
    3139           0 :         if (rc != 0) {
    3140           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Set Features - Host ID failed: %d\n", rc);
    3141           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3142           0 :                 return rc;
    3143             :         }
    3144             : 
    3145           0 :         return 0;
    3146          14 : }
    3147             : 
    3148             : void
    3149           4 : nvme_ctrlr_update_namespaces(struct spdk_nvme_ctrlr *ctrlr)
    3150             : {
    3151             :         uint32_t nsid;
    3152             :         struct spdk_nvme_ns *ns;
    3153             : 
    3154          19 :         for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
    3155          19 :              nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
    3156          15 :                 ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    3157          15 :                 nvme_ns_construct(ns, nsid, ctrlr);
    3158          15 :         }
    3159           4 : }
    3160             : 
    3161             : static int
    3162           4 : nvme_ctrlr_clear_changed_ns_log(struct spdk_nvme_ctrlr *ctrlr)
    3163             : {
    3164             :         struct nvme_completion_poll_status      *status;
    3165           4 :         int             rc = -ENOMEM;
    3166           4 :         char            *buffer = NULL;
    3167             :         uint32_t        nsid;
    3168           4 :         size_t          buf_size = (SPDK_NVME_MAX_CHANGED_NAMESPACES * sizeof(uint32_t));
    3169             : 
    3170           4 :         if (ctrlr->opts.disable_read_changed_ns_list_log_page) {
    3171           0 :                 return 0;
    3172             :         }
    3173             : 
    3174           4 :         buffer = spdk_dma_zmalloc(buf_size, 4096, NULL);
    3175           4 :         if (!buffer) {
    3176           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate buffer for getting "
    3177             :                                   "changed ns log.\n");
    3178           0 :                 return rc;
    3179             :         }
    3180             : 
    3181           4 :         status = calloc(1, sizeof(*status));
    3182           4 :         if (!status) {
    3183           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    3184           0 :                 goto free_buffer;
    3185             :         }
    3186             : 
    3187           8 :         rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr,
    3188             :                                               SPDK_NVME_LOG_CHANGED_NS_LIST,
    3189             :                                               SPDK_NVME_GLOBAL_NS_TAG,
    3190           4 :                                               buffer, buf_size, 0,
    3191           4 :                                               nvme_completion_poll_cb, status);
    3192             : 
    3193           4 :         if (rc) {
    3194           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_get_log_page() failed: rc=%d\n", rc);
    3195           0 :                 free(status);
    3196           0 :                 goto free_buffer;
    3197             :         }
    3198             : 
    3199           8 :         rc = nvme_wait_for_completion_timeout(ctrlr->adminq, status,
    3200           4 :                                               ctrlr->opts.admin_timeout_ms * 1000);
    3201           4 :         if (!status->timed_out) {
    3202           4 :                 free(status);
    3203           4 :         }
    3204             : 
    3205           4 :         if (rc) {
    3206           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "wait for spdk_nvme_ctrlr_cmd_get_log_page failed: rc=%d\n", rc);
    3207           0 :                 goto free_buffer;
    3208             :         }
    3209             : 
    3210             :         /* only check the case of overflow. */
    3211           4 :         nsid = from_le32(buffer);
    3212           4 :         if (nsid == 0xffffffffu) {
    3213           0 :                 NVME_CTRLR_WARNLOG(ctrlr, "changed ns log overflowed.\n");
    3214           0 :         }
    3215             : 
    3216             : free_buffer:
    3217           4 :         spdk_dma_free(buffer);
    3218           4 :         return rc;
    3219           4 : }
    3220             : 
    3221             : static void
    3222           5 : nvme_ctrlr_process_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3223             :                                const struct spdk_nvme_cpl *cpl)
    3224             : {
    3225             :         union spdk_nvme_async_event_completion event;
    3226             :         struct spdk_nvme_ctrlr_process *active_proc;
    3227             :         int rc;
    3228             : 
    3229           5 :         event.raw = cpl->cdw0;
    3230             : 
    3231           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3232           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) {
    3233           4 :                 nvme_ctrlr_clear_changed_ns_log(ctrlr);
    3234             : 
    3235           4 :                 rc = nvme_ctrlr_identify_active_ns(ctrlr);
    3236           4 :                 if (rc) {
    3237           0 :                         return;
    3238             :                 }
    3239           4 :                 nvme_ctrlr_update_namespaces(ctrlr);
    3240           4 :                 nvme_io_msg_ctrlr_update(ctrlr);
    3241           4 :         }
    3242             : 
    3243           5 :         if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) &&
    3244           5 :             (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_ANA_CHANGE)) {
    3245           1 :                 if (!ctrlr->opts.disable_read_ana_log_page) {
    3246           1 :                         rc = nvme_ctrlr_update_ana_log_page(ctrlr);
    3247           1 :                         if (rc) {
    3248           0 :                                 return;
    3249             :                         }
    3250           2 :                         nvme_ctrlr_parse_ana_log_page(ctrlr, nvme_ctrlr_update_ns_ana_states,
    3251           1 :                                                       ctrlr);
    3252           1 :                 }
    3253           1 :         }
    3254             : 
    3255           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3256           5 :         if (active_proc && active_proc->aer_cb_fn) {
    3257           3 :                 active_proc->aer_cb_fn(active_proc->aer_cb_arg, cpl);
    3258           3 :         }
    3259           5 : }
    3260             : 
    3261             : static void
    3262           5 : nvme_ctrlr_queue_async_event(struct spdk_nvme_ctrlr *ctrlr,
    3263             :                              const struct spdk_nvme_cpl *cpl)
    3264             : {
    3265             :         struct  spdk_nvme_ctrlr_aer_completion *nvme_event;
    3266             :         struct spdk_nvme_ctrlr_process *proc;
    3267             : 
    3268             :         /* Add async event to each process objects event list */
    3269          10 :         TAILQ_FOREACH(proc, &ctrlr->active_procs, tailq) {
    3270             :                 /* Must be shared memory so other processes can access */
    3271           5 :                 nvme_event = spdk_zmalloc(sizeof(*nvme_event), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    3272           5 :                 if (!nvme_event) {
    3273           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
    3274           0 :                         return;
    3275             :                 }
    3276           5 :                 nvme_event->cpl = *cpl;
    3277             : 
    3278           5 :                 STAILQ_INSERT_TAIL(&proc->async_events, nvme_event, link);
    3279           5 :         }
    3280           5 : }
    3281             : 
    3282             : static void
    3283           5 : nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr)
    3284             : {
    3285             :         struct  spdk_nvme_ctrlr_aer_completion  *nvme_event, *nvme_event_tmp;
    3286             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3287             : 
    3288           5 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3289             : 
    3290          10 :         STAILQ_FOREACH_SAFE(nvme_event, &active_proc->async_events, link, nvme_event_tmp) {
    3291           5 :                 STAILQ_REMOVE(&active_proc->async_events, nvme_event,
    3292             :                               spdk_nvme_ctrlr_aer_completion, link);
    3293           5 :                 nvme_ctrlr_process_async_event(ctrlr, &nvme_event->cpl);
    3294           5 :                 spdk_free(nvme_event);
    3295             : 
    3296           5 :         }
    3297           5 : }
    3298             : 
    3299             : static void
    3300           5 : nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    3301             : {
    3302           5 :         struct nvme_async_event_request *aer = arg;
    3303           5 :         struct spdk_nvme_ctrlr          *ctrlr = aer->ctrlr;
    3304             : 
    3305           5 :         if (cpl->status.sct == SPDK_NVME_SCT_GENERIC &&
    3306           5 :             cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
    3307             :                 /*
    3308             :                  *  This is simulated when controller is being shut down, to
    3309             :                  *  effectively abort outstanding asynchronous event requests
    3310             :                  *  and make sure all memory is freed.  Do not repost the
    3311             :                  *  request in this case.
    3312             :                  */
    3313           0 :                 return;
    3314             :         }
    3315             : 
    3316           5 :         if (cpl->status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
    3317           0 :             cpl->status.sc == SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED) {
    3318             :                 /*
    3319             :                  *  SPDK will only send as many AERs as the device says it supports,
    3320             :                  *  so this status code indicates an out-of-spec device.  Do not repost
    3321             :                  *  the request in this case.
    3322             :                  */
    3323           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Controller appears out-of-spec for asynchronous event request\n"
    3324             :                                   "handling.  Do not repost this AER.\n");
    3325           0 :                 return;
    3326             :         }
    3327             : 
    3328             :         /* Add the events to the list */
    3329           5 :         nvme_ctrlr_queue_async_event(ctrlr, cpl);
    3330             : 
    3331             :         /* If the ctrlr was removed or in the destruct state, we should not send aer again */
    3332           5 :         if (ctrlr->is_removed || ctrlr->is_destructed) {
    3333           0 :                 return;
    3334             :         }
    3335             : 
    3336             :         /*
    3337             :          * Repost another asynchronous event request to replace the one
    3338             :          *  that just completed.
    3339             :          */
    3340           5 :         if (nvme_ctrlr_construct_and_submit_aer(ctrlr, aer)) {
    3341             :                 /*
    3342             :                  * We can't do anything to recover from a failure here,
    3343             :                  * so just print a warning message and leave the AER unsubmitted.
    3344             :                  */
    3345           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "resubmitting AER failed!\n");
    3346           0 :         }
    3347           5 : }
    3348             : 
    3349             : static int
    3350          24 : nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
    3351             :                                     struct nvme_async_event_request *aer)
    3352             : {
    3353             :         struct nvme_request *req;
    3354             : 
    3355          24 :         aer->ctrlr = ctrlr;
    3356          24 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_async_event_cb, aer);
    3357          24 :         aer->req = req;
    3358          24 :         if (req == NULL) {
    3359           0 :                 return -1;
    3360             :         }
    3361             : 
    3362          24 :         req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
    3363          24 :         return nvme_ctrlr_submit_admin_request(ctrlr, req);
    3364          24 : }
    3365             : 
    3366             : static void
    3367          19 : nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
    3368             : {
    3369             :         struct nvme_async_event_request         *aer;
    3370             :         int                                     rc;
    3371             :         uint32_t                                i;
    3372          19 :         struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)arg;
    3373             : 
    3374          19 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3375           0 :                 NVME_CTRLR_NOTICELOG(ctrlr, "nvme_ctrlr_configure_aer failed!\n");
    3376           0 :                 ctrlr->num_aers = 0;
    3377           0 :         } else {
    3378             :                 /* aerl is a zero-based value, so we need to add 1 here. */
    3379          19 :                 ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
    3380             :         }
    3381             : 
    3382          38 :         for (i = 0; i < ctrlr->num_aers; i++) {
    3383          19 :                 aer = &ctrlr->aer[i];
    3384          19 :                 rc = nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
    3385          19 :                 if (rc) {
    3386           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_construct_and_submit_aer failed!\n");
    3387           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3388           0 :                         return;
    3389             :                 }
    3390          19 :         }
    3391          19 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, ctrlr->opts.admin_timeout_ms);
    3392          19 : }
    3393             : 
    3394             : static int
    3395          19 : nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
    3396             : {
    3397             :         union spdk_nvme_feat_async_event_configuration  config;
    3398             :         int                                             rc;
    3399             : 
    3400          19 :         config.raw = 0;
    3401             : 
    3402          19 :         if (spdk_nvme_ctrlr_is_discovery(ctrlr)) {
    3403           0 :                 config.bits.discovery_log_change_notice = 1;
    3404           0 :         } else {
    3405          19 :                 config.bits.crit_warn.bits.available_spare = 1;
    3406          19 :                 config.bits.crit_warn.bits.temperature = 1;
    3407          19 :                 config.bits.crit_warn.bits.device_reliability = 1;
    3408          19 :                 config.bits.crit_warn.bits.read_only = 1;
    3409          19 :                 config.bits.crit_warn.bits.volatile_memory_backup = 1;
    3410             : 
    3411          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 2, 0)) {
    3412           4 :                         if (ctrlr->cdata.oaes.ns_attribute_notices) {
    3413           0 :                                 config.bits.ns_attr_notice = 1;
    3414           0 :                         }
    3415           4 :                         if (ctrlr->cdata.oaes.fw_activation_notices) {
    3416           0 :                                 config.bits.fw_activation_notice = 1;
    3417           0 :                         }
    3418           4 :                         if (ctrlr->cdata.oaes.ana_change_notices) {
    3419           0 :                                 config.bits.ana_change_notice = 1;
    3420           0 :                         }
    3421           4 :                 }
    3422          19 :                 if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) && ctrlr->cdata.lpa.telemetry) {
    3423           0 :                         config.bits.telemetry_log_notice = 1;
    3424           0 :                 }
    3425             :         }
    3426             : 
    3427          38 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
    3428          19 :                              ctrlr->opts.admin_timeout_ms);
    3429             : 
    3430          38 :         rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
    3431             :                         nvme_ctrlr_configure_aer_done,
    3432          19 :                         ctrlr);
    3433          19 :         if (rc != 0) {
    3434           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3435           0 :                 return rc;
    3436             :         }
    3437             : 
    3438          19 :         return 0;
    3439          19 : }
    3440             : 
    3441             : struct spdk_nvme_ctrlr_process *
    3442          61 : nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr, pid_t pid)
    3443             : {
    3444             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3445             : 
    3446          61 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3447          12 :                 if (active_proc->pid == pid) {
    3448          12 :                         return active_proc;
    3449             :                 }
    3450           0 :         }
    3451             : 
    3452          49 :         return NULL;
    3453          61 : }
    3454             : 
    3455             : struct spdk_nvme_ctrlr_process *
    3456          57 : nvme_ctrlr_get_current_process(struct spdk_nvme_ctrlr *ctrlr)
    3457             : {
    3458          57 :         return nvme_ctrlr_get_process(ctrlr, getpid());
    3459             : }
    3460             : 
    3461             : /**
    3462             :  * This function will be called when a process is using the controller.
    3463             :  *  1. For the primary process, it is called when constructing the controller.
    3464             :  *  2. For the secondary process, it is called at probing the controller.
    3465             :  * Note: will check whether the process is already added for the same process.
    3466             :  */
    3467             : int
    3468           4 : nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
    3469             : {
    3470             :         struct spdk_nvme_ctrlr_process  *ctrlr_proc;
    3471           4 :         pid_t                           pid = getpid();
    3472             : 
    3473             :         /* Check whether the process is already added or not */
    3474           4 :         if (nvme_ctrlr_get_process(ctrlr, pid)) {
    3475           0 :                 return 0;
    3476             :         }
    3477             : 
    3478             :         /* Initialize the per process properties for this ctrlr */
    3479           4 :         ctrlr_proc = spdk_zmalloc(sizeof(struct spdk_nvme_ctrlr_process),
    3480             :                                   64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    3481           4 :         if (ctrlr_proc == NULL) {
    3482           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "failed to allocate memory to track the process props\n");
    3483             : 
    3484           0 :                 return -1;
    3485             :         }
    3486             : 
    3487           4 :         ctrlr_proc->is_primary = spdk_process_is_primary();
    3488           4 :         ctrlr_proc->pid = pid;
    3489           4 :         STAILQ_INIT(&ctrlr_proc->active_reqs);
    3490           4 :         ctrlr_proc->devhandle = devhandle;
    3491           4 :         ctrlr_proc->ref = 0;
    3492           4 :         TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
    3493           4 :         STAILQ_INIT(&ctrlr_proc->async_events);
    3494             : 
    3495           4 :         TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
    3496             : 
    3497           4 :         return 0;
    3498           4 : }
    3499             : 
    3500             : /**
    3501             :  * This function will be called when the process detaches the controller.
    3502             :  * Note: the ctrlr_lock must be held when calling this function.
    3503             :  */
    3504             : static void
    3505           1 : nvme_ctrlr_remove_process(struct spdk_nvme_ctrlr *ctrlr,
    3506             :                           struct spdk_nvme_ctrlr_process *proc)
    3507             : {
    3508             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3509             : 
    3510           1 :         assert(STAILQ_EMPTY(&proc->active_reqs));
    3511             : 
    3512           1 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3513           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3514           0 :         }
    3515             : 
    3516           1 :         TAILQ_REMOVE(&ctrlr->active_procs, proc, tailq);
    3517             : 
    3518           1 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    3519           1 :                 spdk_pci_device_detach(proc->devhandle);
    3520           1 :         }
    3521             : 
    3522           1 :         spdk_free(proc);
    3523           1 : }
    3524             : 
    3525             : /**
    3526             :  * This function will be called when the process exited unexpectedly
    3527             :  *  in order to free any incomplete nvme request, allocated IO qpairs
    3528             :  *  and allocated memory.
    3529             :  * Note: the ctrlr_lock must be held when calling this function.
    3530             :  */
    3531             : static void
    3532           0 : nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
    3533             : {
    3534             :         struct nvme_request     *req, *tmp_req;
    3535             :         struct spdk_nvme_qpair  *qpair, *tmp_qpair;
    3536             :         struct spdk_nvme_ctrlr_aer_completion *event;
    3537             : 
    3538           0 :         STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
    3539           0 :                 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
    3540             : 
    3541           0 :                 assert(req->pid == proc->pid);
    3542           0 :                 nvme_cleanup_user_req(req);
    3543           0 :                 nvme_free_request(req);
    3544           0 :         }
    3545             : 
    3546             :         /* Remove async event from each process objects event list */
    3547           0 :         while (!STAILQ_EMPTY(&proc->async_events)) {
    3548           0 :                 event = STAILQ_FIRST(&proc->async_events);
    3549           0 :                 STAILQ_REMOVE_HEAD(&proc->async_events, link);
    3550           0 :                 spdk_free(event);
    3551             :         }
    3552             : 
    3553           0 :         TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
    3554           0 :                 TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
    3555             : 
    3556             :                 /*
    3557             :                  * The process may have been killed while some qpairs were in their
    3558             :                  *  completion context.  Clear that flag here to allow these IO
    3559             :                  *  qpairs to be deleted.
    3560             :                  */
    3561           0 :                 qpair->in_completion_context = 0;
    3562             : 
    3563           0 :                 qpair->no_deletion_notification_needed = 1;
    3564             : 
    3565           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    3566           0 :         }
    3567             : 
    3568           0 :         spdk_free(proc);
    3569           0 : }
    3570             : 
    3571             : /**
    3572             :  * This function will be called when destructing the controller.
    3573             :  *  1. There is no more admin request on this controller.
    3574             :  *  2. Clean up any left resource allocation when its associated process is gone.
    3575             :  */
    3576             : void
    3577          50 : nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
    3578             : {
    3579             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3580             : 
    3581             :         /* Free all the processes' properties and make sure no pending admin IOs */
    3582          53 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3583           3 :                 TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3584             : 
    3585           3 :                 assert(STAILQ_EMPTY(&active_proc->active_reqs));
    3586             : 
    3587           3 :                 spdk_free(active_proc);
    3588           3 :         }
    3589          50 : }
    3590             : 
    3591             : /**
    3592             :  * This function will be called when any other process attaches or
    3593             :  *  detaches the controller in order to cleanup those unexpectedly
    3594             :  *  terminated processes.
    3595             :  * Note: the ctrlr_lock must be held when calling this function.
    3596             :  */
    3597             : static int
    3598           0 : nvme_ctrlr_remove_inactive_proc(struct spdk_nvme_ctrlr *ctrlr)
    3599             : {
    3600             :         struct spdk_nvme_ctrlr_process  *active_proc, *tmp;
    3601           0 :         int                             active_proc_count = 0;
    3602             : 
    3603           0 :         TAILQ_FOREACH_SAFE(active_proc, &ctrlr->active_procs, tailq, tmp) {
    3604           0 :                 if ((kill(active_proc->pid, 0) == -1) && (errno == ESRCH)) {
    3605           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "process %d terminated unexpected\n", active_proc->pid);
    3606             : 
    3607           0 :                         TAILQ_REMOVE(&ctrlr->active_procs, active_proc, tailq);
    3608             : 
    3609           0 :                         nvme_ctrlr_cleanup_process(active_proc);
    3610           0 :                 } else {
    3611           0 :                         active_proc_count++;
    3612             :                 }
    3613           0 :         }
    3614             : 
    3615           0 :         return active_proc_count;
    3616             : }
    3617             : 
    3618             : void
    3619           0 : nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
    3620             : {
    3621             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3622             : 
    3623           0 :         nvme_ctrlr_lock(ctrlr);
    3624             : 
    3625           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3626             : 
    3627           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3628           0 :         if (active_proc) {
    3629           0 :                 active_proc->ref++;
    3630           0 :         }
    3631             : 
    3632           0 :         nvme_ctrlr_unlock(ctrlr);
    3633           0 : }
    3634             : 
    3635             : void
    3636           0 : nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
    3637             : {
    3638             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3639             :         int                             proc_count;
    3640             : 
    3641           0 :         nvme_ctrlr_lock(ctrlr);
    3642             : 
    3643           0 :         proc_count = nvme_ctrlr_remove_inactive_proc(ctrlr);
    3644             : 
    3645           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3646           0 :         if (active_proc) {
    3647           0 :                 active_proc->ref--;
    3648           0 :                 assert(active_proc->ref >= 0);
    3649             : 
    3650             :                 /*
    3651             :                  * The last active process will be removed at the end of
    3652             :                  * the destruction of the controller.
    3653             :                  */
    3654           0 :                 if (active_proc->ref == 0 && proc_count != 1) {
    3655           0 :                         nvme_ctrlr_remove_process(ctrlr, active_proc);
    3656           0 :                 }
    3657           0 :         }
    3658             : 
    3659           0 :         nvme_ctrlr_unlock(ctrlr);
    3660           0 : }
    3661             : 
    3662             : int
    3663           0 : nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
    3664             : {
    3665             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3666           0 :         int                             ref = 0;
    3667             : 
    3668           0 :         nvme_ctrlr_lock(ctrlr);
    3669             : 
    3670           0 :         nvme_ctrlr_remove_inactive_proc(ctrlr);
    3671             : 
    3672           0 :         TAILQ_FOREACH(active_proc, &ctrlr->active_procs, tailq) {
    3673           0 :                 ref += active_proc->ref;
    3674           0 :         }
    3675             : 
    3676           0 :         nvme_ctrlr_unlock(ctrlr);
    3677             : 
    3678           0 :         return ref;
    3679             : }
    3680             : 
    3681             : /**
    3682             :  *  Get the PCI device handle which is only visible to its associated process.
    3683             :  */
    3684             : struct spdk_pci_device *
    3685           0 : nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
    3686             : {
    3687             :         struct spdk_nvme_ctrlr_process  *active_proc;
    3688           0 :         struct spdk_pci_device          *devhandle = NULL;
    3689             : 
    3690           0 :         nvme_ctrlr_lock(ctrlr);
    3691             : 
    3692           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    3693           0 :         if (active_proc) {
    3694           0 :                 devhandle = active_proc->devhandle;
    3695           0 :         }
    3696             : 
    3697           0 :         nvme_ctrlr_unlock(ctrlr);
    3698             : 
    3699           0 :         return devhandle;
    3700             : }
    3701             : 
    3702             : static void
    3703          21 : nvme_ctrlr_process_init_vs_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3704             : {
    3705          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3706             : 
    3707          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3708           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the VS register\n");
    3709           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3710           0 :                 return;
    3711             :         }
    3712             : 
    3713          21 :         assert(value <= UINT32_MAX);
    3714          21 :         ctrlr->vs.raw = (uint32_t)value;
    3715          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP, NVME_TIMEOUT_INFINITE);
    3716          21 : }
    3717             : 
    3718             : static void
    3719          21 : nvme_ctrlr_process_init_cap_done(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3720             : {
    3721          21 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3722             : 
    3723          21 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3724           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CAP register\n");
    3725           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3726           0 :                 return;
    3727             :         }
    3728             : 
    3729          21 :         ctrlr->cap.raw = value;
    3730          21 :         nvme_ctrlr_init_cap(ctrlr);
    3731          21 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN, NVME_TIMEOUT_INFINITE);
    3732          21 : }
    3733             : 
    3734             : static void
    3735          22 : nvme_ctrlr_process_init_check_en(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3736             : {
    3737          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3738             :         enum nvme_ctrlr_state state;
    3739             : 
    3740          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3741           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3742           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3743           0 :                 return;
    3744             :         }
    3745             : 
    3746          22 :         assert(value <= UINT32_MAX);
    3747          22 :         ctrlr->process_init_cc.raw = (uint32_t)value;
    3748             : 
    3749          22 :         if (ctrlr->process_init_cc.bits.en) {
    3750           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1\n");
    3751           2 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
    3752           2 :         } else {
    3753          20 :                 state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
    3754             :         }
    3755             : 
    3756          22 :         nvme_ctrlr_set_state(ctrlr, state, nvme_ctrlr_get_ready_timeout(ctrlr));
    3757          22 : }
    3758             : 
    3759             : static void
    3760           2 : nvme_ctrlr_process_init_set_en_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3761             : {
    3762           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3763             : 
    3764           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3765           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to write the CC register\n");
    3766           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3767           0 :                 return;
    3768             :         }
    3769             : 
    3770             :         /*
    3771             :          * Wait 2.5 seconds before accessing PCI registers.
    3772             :          * Not using sleep() to avoid blocking other controller's initialization.
    3773             :          */
    3774           2 :         if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) {
    3775           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Applying quirk: delay 2.5 seconds before reading registers\n");
    3776           0 :                 ctrlr->sleep_timeout_tsc = spdk_get_ticks() + (2500 * spdk_get_ticks_hz() / 1000);
    3777           0 :         }
    3778             : 
    3779           4 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3780           2 :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3781           2 : }
    3782             : 
    3783             : static void
    3784           2 : nvme_ctrlr_process_init_set_en_0_read_cc(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3785             : {
    3786           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3787             :         union spdk_nvme_cc_register cc;
    3788             :         int rc;
    3789             : 
    3790           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3791           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CC register\n");
    3792           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3793           0 :                 return;
    3794             :         }
    3795             : 
    3796           2 :         assert(value <= UINT32_MAX);
    3797           2 :         cc.raw = (uint32_t)value;
    3798           2 :         cc.bits.en = 0;
    3799           2 :         ctrlr->process_init_cc.raw = cc.raw;
    3800             : 
    3801           4 :         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC,
    3802           2 :                              nvme_ctrlr_get_ready_timeout(ctrlr));
    3803             : 
    3804           2 :         rc = nvme_ctrlr_set_cc_async(ctrlr, cc.raw, nvme_ctrlr_process_init_set_en_0, ctrlr);
    3805           2 :         if (rc != 0) {
    3806           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_cc() failed\n");
    3807           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3808           0 :         }
    3809           2 : }
    3810             : 
    3811             : static void
    3812           2 : nvme_ctrlr_process_init_wait_for_ready_1(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3813             : {
    3814           2 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3815             :         union spdk_nvme_csts_register csts;
    3816             : 
    3817           2 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3818             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3819             :                  * temporarily. Allow for this case.
    3820             :                  */
    3821           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3822           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3823           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3824             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3825           0 :                 } else {
    3826           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3827           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3828             :                 }
    3829             : 
    3830           0 :                 return;
    3831             :         }
    3832             : 
    3833           2 :         assert(value <= UINT32_MAX);
    3834           2 :         csts.raw = (uint32_t)value;
    3835           2 :         if (csts.bits.rdy == 1 || csts.bits.cfs == 1) {
    3836           4 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0,
    3837           2 :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3838           2 :         } else {
    3839           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 0 - waiting for reset to complete\n");
    3840           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
    3841             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3842             :         }
    3843           2 : }
    3844             : 
    3845             : static void
    3846          22 : nvme_ctrlr_process_init_wait_for_ready_0(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl)
    3847             : {
    3848          22 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3849             :         union spdk_nvme_csts_register csts;
    3850             : 
    3851          22 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3852             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3853             :                  * temporarily. Allow for this case.
    3854             :                  */
    3855           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3856           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3857           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3858             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3859           0 :                 } else {
    3860           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3861           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3862             :                 }
    3863             : 
    3864           0 :                 return;
    3865             :         }
    3866             : 
    3867          22 :         assert(value <= UINT32_MAX);
    3868          22 :         csts.raw = (uint32_t)value;
    3869          22 :         if (csts.bits.rdy == 0) {
    3870          22 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 0 && CSTS.RDY = 0\n");
    3871          44 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_DISABLED,
    3872          22 :                                      nvme_ctrlr_get_ready_timeout(ctrlr));
    3873          22 :         } else {
    3874           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
    3875             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3876             :         }
    3877          22 : }
    3878             : 
    3879             : static void
    3880           9 : nvme_ctrlr_process_init_enable_wait_for_ready_1(void *ctx, uint64_t value,
    3881             :                 const struct spdk_nvme_cpl *cpl)
    3882             : {
    3883           9 :         struct spdk_nvme_ctrlr *ctrlr = ctx;
    3884             :         union spdk_nvme_csts_register csts;
    3885             : 
    3886           9 :         if (spdk_nvme_cpl_is_error(cpl)) {
    3887             :                 /* While a device is resetting, it may be unable to service MMIO reads
    3888             :                  * temporarily. Allow for this case.
    3889             :                  */
    3890           0 :                 if (!ctrlr->is_failed && ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE) {
    3891           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Failed to read the CSTS register\n");
    3892           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3893             :                                              NVME_TIMEOUT_KEEP_EXISTING);
    3894           0 :                 } else {
    3895           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "Failed to read the CSTS register\n");
    3896           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3897             :                 }
    3898             : 
    3899           0 :                 return;
    3900             :         }
    3901             : 
    3902           9 :         assert(value <= UINT32_MAX);
    3903           9 :         csts.raw = value;
    3904           9 :         if (csts.bits.rdy == 1) {
    3905           9 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "CC.EN = 1 && CSTS.RDY = 1 - controller is ready\n");
    3906             :                 /*
    3907             :                  * The controller has been enabled.
    3908             :                  *  Perform the rest of initialization serially.
    3909             :                  */
    3910          18 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_RESET_ADMIN_QUEUE,
    3911           9 :                                      ctrlr->opts.admin_timeout_ms);
    3912           9 :         } else {
    3913           0 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
    3914             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    3915             :         }
    3916           9 : }
    3917             : 
    3918             : /**
    3919             :  * This function will be called repeatedly during initialization until the controller is ready.
    3920             :  */
    3921             : int
    3922         446 : nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
    3923             : {
    3924             :         uint32_t ready_timeout_in_ms;
    3925             :         uint64_t ticks;
    3926         446 :         int rc = 0;
    3927             : 
    3928         446 :         ticks = spdk_get_ticks();
    3929             : 
    3930             :         /*
    3931             :          * May need to avoid accessing any register on the target controller
    3932             :          * for a while. Return early without touching the FSM.
    3933             :          * Check sleep_timeout_tsc > 0 for unit test.
    3934             :          */
    3935         446 :         if ((ctrlr->sleep_timeout_tsc > 0) &&
    3936           2 :             (ticks <= ctrlr->sleep_timeout_tsc)) {
    3937           1 :                 return 0;
    3938             :         }
    3939         445 :         ctrlr->sleep_timeout_tsc = 0;
    3940             : 
    3941         445 :         ready_timeout_in_ms = nvme_ctrlr_get_ready_timeout(ctrlr);
    3942             : 
    3943             :         /*
    3944             :          * Check if the current initialization step is done or has timed out.
    3945             :          */
    3946         445 :         switch (ctrlr->state) {
    3947             :         case NVME_CTRLR_STATE_INIT_DELAY:
    3948           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, ready_timeout_in_ms);
    3949           1 :                 if (ctrlr->quirks & NVME_QUIRK_DELAY_BEFORE_INIT) {
    3950             :                         /*
    3951             :                          * Controller may need some delay before it's enabled.
    3952             :                          *
    3953             :                          * This is a workaround for an issue where the PCIe-attached NVMe controller
    3954             :                          * is not ready after VFIO reset. We delay the initialization rather than the
    3955             :                          * enabling itself, because this is required only for the very first enabling
    3956             :                          * - directly after a VFIO reset.
    3957             :                          */
    3958           1 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Adding 2 second delay before initializing the controller\n");
    3959           1 :                         ctrlr->sleep_timeout_tsc = ticks + (2000 * spdk_get_ticks_hz() / 1000);
    3960           1 :                 }
    3961           1 :                 break;
    3962             : 
    3963             :         case NVME_CTRLR_STATE_DISCONNECTED:
    3964           0 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    3965           0 :                 break;
    3966             : 
    3967             :         case NVME_CTRLR_STATE_CONNECT_ADMINQ: /* synonymous with NVME_CTRLR_STATE_INIT and NVME_CTRLR_STATE_DISCONNECTED */
    3968          21 :                 rc = nvme_transport_ctrlr_connect_qpair(ctrlr, ctrlr->adminq);
    3969          21 :                 if (rc == 0) {
    3970          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ,
    3971             :                                              NVME_TIMEOUT_INFINITE);
    3972          21 :                 } else {
    3973           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    3974             :                 }
    3975          21 :                 break;
    3976             : 
    3977             :         case NVME_CTRLR_STATE_WAIT_FOR_CONNECT_ADMINQ:
    3978          21 :                 spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    3979             : 
    3980          21 :                 switch (nvme_qpair_get_state(ctrlr->adminq)) {
    3981             :                 case NVME_QPAIR_CONNECTING:
    3982           0 :                         if (ctrlr->is_failed) {
    3983           0 :                                 nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    3984           0 :                                 break;
    3985             :                         }
    3986             : 
    3987           0 :                         break;
    3988             :                 case NVME_QPAIR_CONNECTED:
    3989          21 :                         nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_ENABLED);
    3990             :                 /* Fall through */
    3991             :                 case NVME_QPAIR_ENABLED:
    3992          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS,
    3993             :                                              NVME_TIMEOUT_INFINITE);
    3994             :                         /* Abort any queued requests that were sent while the adminq was connecting
    3995             :                          * to avoid stalling the init process during a reset, as requests don't get
    3996             :                          * resubmitted while the controller is resetting and subsequent commands
    3997             :                          * would get queued too.
    3998             :                          */
    3999          21 :                         nvme_qpair_abort_queued_reqs(ctrlr->adminq);
    4000          21 :                         break;
    4001             :                 case NVME_QPAIR_DISCONNECTING:
    4002           0 :                         assert(ctrlr->adminq->async == true);
    4003           0 :                         break;
    4004           0 :                 case NVME_QPAIR_DISCONNECTED:
    4005             :                 /* fallthrough */
    4006             :                 default:
    4007           0 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4008           0 :                         break;
    4009             :                 }
    4010             : 
    4011          21 :                 break;
    4012             : 
    4013             :         case NVME_CTRLR_STATE_READ_VS:
    4014          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS, NVME_TIMEOUT_INFINITE);
    4015          21 :                 rc = nvme_ctrlr_get_vs_async(ctrlr, nvme_ctrlr_process_init_vs_done, ctrlr);
    4016          21 :                 break;
    4017             : 
    4018             :         case NVME_CTRLR_STATE_READ_CAP:
    4019          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP, NVME_TIMEOUT_INFINITE);
    4020          21 :                 rc = nvme_ctrlr_get_cap_async(ctrlr, nvme_ctrlr_process_init_cap_done, ctrlr);
    4021          21 :                 break;
    4022             : 
    4023             :         case NVME_CTRLR_STATE_CHECK_EN:
    4024             :                 /* Begin the hardware initialization by making sure the controller is disabled. */
    4025          22 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC, ready_timeout_in_ms);
    4026          22 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_check_en, ctrlr);
    4027          22 :                 break;
    4028             : 
    4029             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
    4030             :                 /*
    4031             :                  * Controller is currently enabled. We need to disable it to cause a reset.
    4032             :                  *
    4033             :                  * If CC.EN = 1 && CSTS.RDY = 0, the controller is in the process of becoming ready.
    4034             :                  *  Wait for the ready bit to be 1 before disabling the controller.
    4035             :                  */
    4036           2 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    4037             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4038           2 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_1, ctrlr);
    4039           2 :                 break;
    4040             : 
    4041             :         case NVME_CTRLR_STATE_SET_EN_0:
    4042           2 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 0\n");
    4043           2 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC, ready_timeout_in_ms);
    4044           2 :                 rc = nvme_ctrlr_get_cc_async(ctrlr, nvme_ctrlr_process_init_set_en_0_read_cc, ctrlr);
    4045           2 :                 break;
    4046             : 
    4047             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
    4048          22 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS,
    4049             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4050          22 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_wait_for_ready_0, ctrlr);
    4051          22 :                 break;
    4052             : 
    4053             :         case NVME_CTRLR_STATE_DISABLED:
    4054          21 :                 if (ctrlr->is_disconnecting) {
    4055           0 :                         NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr was disabled.\n");
    4056           0 :                 } else {
    4057          21 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE, ready_timeout_in_ms);
    4058             : 
    4059             :                         /*
    4060             :                          * Delay 100us before setting CC.EN = 1.  Some NVMe SSDs miss CC.EN getting
    4061             :                          *  set to 1 if it is too soon after CSTS.RDY is reported as 0.
    4062             :                          */
    4063          21 :                         spdk_delay_us(100);
    4064             :                 }
    4065          21 :                 break;
    4066             : 
    4067             :         case NVME_CTRLR_STATE_ENABLE:
    4068          21 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Setting CC.EN = 1\n");
    4069          21 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC, ready_timeout_in_ms);
    4070          21 :                 rc = nvme_ctrlr_enable(ctrlr);
    4071          21 :                 if (rc) {
    4072           7 :                         NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr enable failed with error: %d", rc);
    4073           7 :                 }
    4074          21 :                 return rc;
    4075             : 
    4076             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
    4077           9 :                 nvme_ctrlr_set_state_quiet(ctrlr, NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS,
    4078             :                                            NVME_TIMEOUT_KEEP_EXISTING);
    4079           9 :                 rc = nvme_ctrlr_get_csts_async(ctrlr, nvme_ctrlr_process_init_enable_wait_for_ready_1,
    4080             :                                                ctrlr);
    4081           9 :                 break;
    4082             : 
    4083             :         case NVME_CTRLR_STATE_RESET_ADMIN_QUEUE:
    4084           9 :                 nvme_transport_qpair_reset(ctrlr->adminq);
    4085           9 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, NVME_TIMEOUT_INFINITE);
    4086           9 :                 break;
    4087             : 
    4088             :         case NVME_CTRLR_STATE_IDENTIFY:
    4089          16 :                 rc = nvme_ctrlr_identify(ctrlr);
    4090          16 :                 break;
    4091             : 
    4092             :         case NVME_CTRLR_STATE_CONFIGURE_AER:
    4093          19 :                 rc = nvme_ctrlr_configure_aer(ctrlr);
    4094          19 :                 break;
    4095             : 
    4096             :         case NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT:
    4097          22 :                 rc = nvme_ctrlr_set_keep_alive_timeout(ctrlr);
    4098          22 :                 break;
    4099             : 
    4100             :         case NVME_CTRLR_STATE_IDENTIFY_IOCS_SPECIFIC:
    4101          19 :                 rc = nvme_ctrlr_identify_iocs_specific(ctrlr);
    4102          19 :                 break;
    4103             : 
    4104             :         case NVME_CTRLR_STATE_GET_ZNS_CMD_EFFECTS_LOG:
    4105           0 :                 rc = nvme_ctrlr_get_zns_cmd_and_effects_log(ctrlr);
    4106           0 :                 break;
    4107             : 
    4108             :         case NVME_CTRLR_STATE_SET_NUM_QUEUES:
    4109          19 :                 nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
    4110          19 :                 rc = nvme_ctrlr_set_num_queues(ctrlr);
    4111          19 :                 break;
    4112             : 
    4113             :         case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
    4114          24 :                 _nvme_ctrlr_identify_active_ns(ctrlr);
    4115          24 :                 break;
    4116             : 
    4117             :         case NVME_CTRLR_STATE_IDENTIFY_NS:
    4118          14 :                 rc = nvme_ctrlr_identify_namespaces(ctrlr);
    4119          14 :                 break;
    4120             : 
    4121             :         case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
    4122          14 :                 rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
    4123          14 :                 break;
    4124             : 
    4125             :         case NVME_CTRLR_STATE_IDENTIFY_NS_IOCS_SPECIFIC:
    4126          14 :                 rc = nvme_ctrlr_identify_namespaces_iocs_specific(ctrlr);
    4127          14 :                 break;
    4128             : 
    4129             :         case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
    4130          15 :                 rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
    4131          15 :                 break;
    4132             : 
    4133             :         case NVME_CTRLR_STATE_SET_SUPPORTED_INTEL_LOG_PAGES:
    4134           1 :                 rc = nvme_ctrlr_set_intel_support_log_pages(ctrlr);
    4135           1 :                 break;
    4136             : 
    4137             :         case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
    4138          14 :                 nvme_ctrlr_set_supported_features(ctrlr);
    4139          28 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_FEATURE,
    4140          14 :                                      ctrlr->opts.admin_timeout_ms);
    4141          14 :                 break;
    4142             : 
    4143             :         case NVME_CTRLR_STATE_SET_HOST_FEATURE:
    4144          16 :                 rc = nvme_ctrlr_set_host_feature(ctrlr);
    4145          16 :                 break;
    4146             : 
    4147             :         case NVME_CTRLR_STATE_SET_DB_BUF_CFG:
    4148          14 :                 rc = nvme_ctrlr_set_doorbell_buffer_config(ctrlr);
    4149          14 :                 break;
    4150             : 
    4151             :         case NVME_CTRLR_STATE_SET_HOST_ID:
    4152          14 :                 rc = nvme_ctrlr_set_host_id(ctrlr);
    4153          14 :                 break;
    4154             : 
    4155             :         case NVME_CTRLR_STATE_TRANSPORT_READY:
    4156          17 :                 rc = nvme_transport_ctrlr_ready(ctrlr);
    4157          17 :                 if (rc) {
    4158           1 :                         NVME_CTRLR_ERRLOG(ctrlr, "Transport controller ready step failed: rc %d\n", rc);
    4159           1 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ERROR, NVME_TIMEOUT_INFINITE);
    4160           1 :                 } else {
    4161          16 :                         nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_READY, NVME_TIMEOUT_INFINITE);
    4162             :                 }
    4163          17 :                 break;
    4164             : 
    4165             :         case NVME_CTRLR_STATE_READY:
    4166           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Ctrlr already in ready state\n");
    4167           0 :                 return 0;
    4168             : 
    4169             :         case NVME_CTRLR_STATE_ERROR:
    4170           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr is in error state\n");
    4171           0 :                 return -1;
    4172             : 
    4173             :         case NVME_CTRLR_STATE_READ_VS_WAIT_FOR_VS:
    4174             :         case NVME_CTRLR_STATE_READ_CAP_WAIT_FOR_CAP:
    4175             :         case NVME_CTRLR_STATE_CHECK_EN_WAIT_FOR_CC:
    4176             :         case NVME_CTRLR_STATE_SET_EN_0_WAIT_FOR_CC:
    4177             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4178             :         case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0_WAIT_FOR_CSTS:
    4179             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_CC:
    4180             :         case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1_WAIT_FOR_CSTS:
    4181             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY:
    4182             :         case NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER:
    4183             :         case NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT:
    4184             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_IOCS_SPECIFIC:
    4185             :         case NVME_CTRLR_STATE_WAIT_FOR_GET_ZNS_CMD_EFFECTS_LOG:
    4186             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES:
    4187             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ACTIVE_NS:
    4188             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS:
    4189             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
    4190             :         case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS_IOCS_SPECIFIC:
    4191             :         case NVME_CTRLR_STATE_WAIT_FOR_SUPPORTED_INTEL_LOG_PAGES:
    4192             :         case NVME_CTRLR_STATE_WAIT_FOR_SET_HOST_FEATURE:
    4193             :         case NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG:
    4194             :         case NVME_CTRLR_STATE_WAIT_FOR_HOST_ID:
    4195             :                 /*
    4196             :                  * nvme_ctrlr_process_init() may be called from the completion context
    4197             :                  * for the admin qpair. Avoid recursive calls for this case.
    4198             :                  */
    4199           0 :                 if (!ctrlr->adminq->in_completion_context) {
    4200           0 :                         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4201           0 :                 }
    4202           0 :                 break;
    4203             : 
    4204             :         default:
    4205           0 :                 assert(0);
    4206             :                 return -1;
    4207             :         }
    4208             : 
    4209         424 :         if (rc) {
    4210           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "Ctrlr operation failed with error: %d, ctrlr state: %d (%s)\n",
    4211             :                                   rc, ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4212           1 :         }
    4213             : 
    4214             :         /* Note: we use the ticks captured when we entered this function.
    4215             :          * This covers environments where the SPDK process gets swapped out after
    4216             :          * we tried to advance the state but before we check the timeout here.
    4217             :          * It is not normal for this to happen, but harmless to handle it in this
    4218             :          * way.
    4219             :          */
    4220         424 :         if (ctrlr->state_timeout_tsc != NVME_TIMEOUT_INFINITE &&
    4221           0 :             ticks > ctrlr->state_timeout_tsc) {
    4222           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Initialization timed out in state %d (%s)\n",
    4223             :                                   ctrlr->state, nvme_ctrlr_state_string(ctrlr->state));
    4224           0 :                 return -1;
    4225             :         }
    4226             : 
    4227         424 :         return rc;
    4228         446 : }
    4229             : 
    4230             : int
    4231          47 : nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx)
    4232             : {
    4233             :         pthread_mutexattr_t attr;
    4234          47 :         int rc = 0;
    4235             : 
    4236          47 :         if (pthread_mutexattr_init(&attr)) {
    4237           0 :                 return -1;
    4238             :         }
    4239          47 :         if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) ||
    4240             : #ifndef __FreeBSD__
    4241             :             pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
    4242             :             pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
    4243             : #endif
    4244          47 :             pthread_mutex_init(mtx, &attr)) {
    4245           0 :                 rc = -1;
    4246           0 :         }
    4247          47 :         pthread_mutexattr_destroy(&attr);
    4248          47 :         return rc;
    4249          47 : }
    4250             : 
    4251             : int
    4252          47 : nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
    4253             : {
    4254             :         int rc;
    4255             : 
    4256          47 :         if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
    4257           1 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT_DELAY, NVME_TIMEOUT_INFINITE);
    4258           1 :         } else {
    4259          46 :                 nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_INIT, NVME_TIMEOUT_INFINITE);
    4260             :         }
    4261             : 
    4262          47 :         if (ctrlr->opts.admin_queue_size > SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES) {
    4263           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "admin_queue_size %u exceeds max defined by NVMe spec, use max value\n",
    4264             :                                   ctrlr->opts.admin_queue_size);
    4265           0 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MAX_ENTRIES;
    4266           0 :         }
    4267             : 
    4268          47 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_ADMIN_QUEUE_SIZE &&
    4269           0 :             (ctrlr->opts.admin_queue_size % SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE) != 0) {
    4270           0 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4271             :                                   "admin_queue_size %u is invalid for this NVMe device, adjust to next multiple\n",
    4272             :                                   ctrlr->opts.admin_queue_size);
    4273           0 :                 ctrlr->opts.admin_queue_size = SPDK_ALIGN_CEIL(ctrlr->opts.admin_queue_size,
    4274             :                                                SPDK_NVME_ADMIN_QUEUE_QUIRK_ENTRIES_MULTIPLE);
    4275           0 :         }
    4276             : 
    4277          47 :         if (ctrlr->opts.admin_queue_size < SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES) {
    4278          26 :                 NVME_CTRLR_ERRLOG(ctrlr,
    4279             :                                   "admin_queue_size %u is less than minimum defined by NVMe spec, use min value\n",
    4280             :                                   ctrlr->opts.admin_queue_size);
    4281          26 :                 ctrlr->opts.admin_queue_size = SPDK_NVME_ADMIN_QUEUE_MIN_ENTRIES;
    4282          26 :         }
    4283             : 
    4284          47 :         ctrlr->flags = 0;
    4285          47 :         ctrlr->free_io_qids = NULL;
    4286          47 :         ctrlr->is_resetting = false;
    4287          47 :         ctrlr->is_failed = false;
    4288          47 :         ctrlr->is_destructed = false;
    4289             : 
    4290          47 :         TAILQ_INIT(&ctrlr->active_io_qpairs);
    4291          47 :         STAILQ_INIT(&ctrlr->queued_aborts);
    4292          47 :         ctrlr->outstanding_aborts = 0;
    4293             : 
    4294          47 :         ctrlr->ana_log_page = NULL;
    4295          47 :         ctrlr->ana_log_page_size = 0;
    4296             : 
    4297          47 :         rc = nvme_robust_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
    4298          47 :         if (rc != 0) {
    4299           0 :                 return rc;
    4300             :         }
    4301             : 
    4302          47 :         TAILQ_INIT(&ctrlr->active_procs);
    4303          47 :         STAILQ_INIT(&ctrlr->register_operations);
    4304             : 
    4305          47 :         RB_INIT(&ctrlr->ns);
    4306             : 
    4307          47 :         return rc;
    4308          47 : }
    4309             : 
    4310             : static void
    4311          21 : nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr)
    4312             : {
    4313          21 :         if (ctrlr->cap.bits.ams & SPDK_NVME_CAP_AMS_WRR) {
    4314           5 :                 ctrlr->flags |= SPDK_NVME_CTRLR_WRR_SUPPORTED;
    4315           5 :         }
    4316             : 
    4317          21 :         ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
    4318             : 
    4319             :         /* For now, always select page_size == min_page_size. */
    4320          21 :         ctrlr->page_size = ctrlr->min_page_size;
    4321             : 
    4322          21 :         ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
    4323          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, MAX_IO_QUEUE_ENTRIES);
    4324          21 :         if (ctrlr->quirks & NVME_QUIRK_MINIMUM_IO_QUEUE_SIZE &&
    4325           0 :             ctrlr->opts.io_queue_size == DEFAULT_IO_QUEUE_SIZE) {
    4326             :                 /* If the user specifically set an IO queue size different than the
    4327             :                  * default, use that value.  Otherwise overwrite with the quirked value.
    4328             :                  * This allows this quirk to be overridden when necessary.
    4329             :                  * However, cap.mqes still needs to be respected.
    4330             :                  */
    4331           0 :                 ctrlr->opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE_FOR_QUIRK;
    4332           0 :         }
    4333          21 :         ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
    4334             : 
    4335          21 :         ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
    4336          21 : }
    4337             : 
    4338             : void
    4339          47 : nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
    4340             : {
    4341             :         int rc;
    4342             : 
    4343          47 :         if (ctrlr->lock_depth > 0) {
    4344           0 :                 SPDK_ERRLOG("lock currently held (depth=%d)!\n", ctrlr->lock_depth);
    4345           0 :                 assert(false);
    4346             :         }
    4347             : 
    4348          47 :         rc = pthread_mutex_destroy(&ctrlr->ctrlr_lock);
    4349          47 :         if (rc) {
    4350           0 :                 SPDK_ERRLOG("could not destroy ctrlr_lock: %s\n", spdk_strerror(rc));
    4351           0 :                 assert(false);
    4352             :         }
    4353             : 
    4354          47 :         nvme_ctrlr_free_processes(ctrlr);
    4355          47 : }
    4356             : 
    4357             : void
    4358          47 : nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
    4359             :                           struct nvme_ctrlr_detach_ctx *ctx)
    4360             : {
    4361             :         struct spdk_nvme_qpair *qpair, *tmp;
    4362             : 
    4363          47 :         NVME_CTRLR_DEBUGLOG(ctrlr, "Prepare to destruct SSD\n");
    4364             : 
    4365          47 :         ctrlr->prepare_for_reset = false;
    4366          47 :         ctrlr->is_destructed = true;
    4367             : 
    4368          47 :         spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4369             : 
    4370          47 :         nvme_ctrlr_abort_queued_aborts(ctrlr);
    4371          47 :         nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
    4372             : 
    4373          47 :         TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
    4374           0 :                 spdk_nvme_ctrlr_free_io_qpair(qpair);
    4375           0 :         }
    4376             : 
    4377          47 :         nvme_ctrlr_free_doorbell_buffer(ctrlr);
    4378          47 :         nvme_ctrlr_free_iocs_specific_data(ctrlr);
    4379             : 
    4380          47 :         nvme_ctrlr_shutdown_async(ctrlr, ctx);
    4381          47 : }
    4382             : 
    4383             : int
    4384          86 : nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
    4385             :                                struct nvme_ctrlr_detach_ctx *ctx)
    4386             : {
    4387             :         struct spdk_nvme_ns *ns, *tmp_ns;
    4388          86 :         int rc = 0;
    4389             : 
    4390          86 :         if (!ctx->shutdown_complete) {
    4391          78 :                 rc = nvme_ctrlr_shutdown_poll_async(ctrlr, ctx);
    4392          78 :                 if (rc == -EAGAIN) {
    4393          39 :                         return -EAGAIN;
    4394             :                 }
    4395             :                 /* Destruct ctrlr forcefully for any other error. */
    4396          39 :         }
    4397             : 
    4398          47 :         if (ctx->cb_fn) {
    4399           0 :                 ctx->cb_fn(ctrlr);
    4400           0 :         }
    4401             : 
    4402          47 :         nvme_transport_ctrlr_disconnect_qpair(ctrlr, ctrlr->adminq);
    4403             : 
    4404        7733 :         RB_FOREACH_SAFE(ns, nvme_ns_tree, &ctrlr->ns, tmp_ns) {
    4405        7686 :                 nvme_ctrlr_destruct_namespace(ctrlr, ns->id);
    4406        7686 :                 RB_REMOVE(nvme_ns_tree, &ctrlr->ns, ns);
    4407        7686 :                 spdk_free(ns);
    4408        7686 :         }
    4409             : 
    4410          47 :         ctrlr->active_ns_count = 0;
    4411             : 
    4412          47 :         spdk_bit_array_free(&ctrlr->free_io_qids);
    4413             : 
    4414          47 :         free(ctrlr->ana_log_page);
    4415          47 :         free(ctrlr->copied_ana_desc);
    4416          47 :         ctrlr->ana_log_page = NULL;
    4417          47 :         ctrlr->copied_ana_desc = NULL;
    4418          47 :         ctrlr->ana_log_page_size = 0;
    4419             : 
    4420          47 :         nvme_transport_ctrlr_destruct(ctrlr);
    4421             : 
    4422          47 :         return rc;
    4423          86 : }
    4424             : 
    4425             : void
    4426          47 : nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
    4427             : {
    4428          47 :         struct nvme_ctrlr_detach_ctx ctx = { .ctrlr = ctrlr };
    4429             :         int rc;
    4430             : 
    4431          47 :         nvme_ctrlr_destruct_async(ctrlr, &ctx);
    4432             : 
    4433          86 :         while (1) {
    4434          86 :                 rc = nvme_ctrlr_destruct_poll_async(ctrlr, &ctx);
    4435          86 :                 if (rc != -EAGAIN) {
    4436          47 :                         break;
    4437             :                 }
    4438          39 :                 nvme_delay(1000);
    4439             :         }
    4440          47 : }
    4441             : 
    4442             : int
    4443          24 : nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
    4444             :                                 struct nvme_request *req)
    4445             : {
    4446          24 :         return nvme_qpair_submit_request(ctrlr->adminq, req);
    4447             : }
    4448             : 
    4449             : static void
    4450           0 : nvme_keep_alive_completion(void *cb_ctx, const struct spdk_nvme_cpl *cpl)
    4451             : {
    4452             :         /* Do nothing */
    4453           0 : }
    4454             : 
    4455             : /*
    4456             :  * Check if we need to send a Keep Alive command.
    4457             :  * Caller must hold ctrlr->ctrlr_lock.
    4458             :  */
    4459             : static int
    4460           0 : nvme_ctrlr_keep_alive(struct spdk_nvme_ctrlr *ctrlr)
    4461             : {
    4462             :         uint64_t now;
    4463             :         struct nvme_request *req;
    4464             :         struct spdk_nvme_cmd *cmd;
    4465           0 :         int rc = 0;
    4466             : 
    4467           0 :         now = spdk_get_ticks();
    4468           0 :         if (now < ctrlr->next_keep_alive_tick) {
    4469           0 :                 return rc;
    4470             :         }
    4471             : 
    4472           0 :         req = nvme_allocate_request_null(ctrlr->adminq, nvme_keep_alive_completion, NULL);
    4473           0 :         if (req == NULL) {
    4474           0 :                 return rc;
    4475             :         }
    4476             : 
    4477           0 :         cmd = &req->cmd;
    4478           0 :         cmd->opc = SPDK_NVME_OPC_KEEP_ALIVE;
    4479             : 
    4480           0 :         rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
    4481           0 :         if (rc != 0) {
    4482           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Submitting Keep Alive failed\n");
    4483           0 :                 rc = -ENXIO;
    4484           0 :         }
    4485             : 
    4486           0 :         ctrlr->next_keep_alive_tick = now + ctrlr->keep_alive_interval_ticks;
    4487           0 :         return rc;
    4488           0 : }
    4489             : 
    4490             : int32_t
    4491           1 : spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
    4492             : {
    4493             :         int32_t num_completions;
    4494             :         int32_t rc;
    4495             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4496             : 
    4497           1 :         nvme_ctrlr_lock(ctrlr);
    4498             : 
    4499           1 :         if (ctrlr->keep_alive_interval_ticks) {
    4500           0 :                 rc = nvme_ctrlr_keep_alive(ctrlr);
    4501           0 :                 if (rc) {
    4502           0 :                         nvme_ctrlr_unlock(ctrlr);
    4503           0 :                         return rc;
    4504             :                 }
    4505           0 :         }
    4506             : 
    4507           1 :         rc = nvme_io_msg_process(ctrlr);
    4508           1 :         if (rc < 0) {
    4509           0 :                 nvme_ctrlr_unlock(ctrlr);
    4510           0 :                 return rc;
    4511             :         }
    4512           1 :         num_completions = rc;
    4513             : 
    4514           1 :         rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
    4515             : 
    4516             :         /* Each process has an async list, complete the ones for this process object */
    4517           1 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4518           1 :         if (active_proc) {
    4519           0 :                 nvme_ctrlr_complete_queued_async_events(ctrlr);
    4520           0 :         }
    4521             : 
    4522           1 :         if (rc == -ENXIO && ctrlr->is_disconnecting) {
    4523           1 :                 nvme_ctrlr_disconnect_done(ctrlr);
    4524           1 :         }
    4525             : 
    4526           1 :         nvme_ctrlr_unlock(ctrlr);
    4527             : 
    4528           1 :         if (rc < 0) {
    4529           1 :                 num_completions = rc;
    4530           1 :         } else {
    4531           0 :                 num_completions += rc;
    4532             :         }
    4533             : 
    4534           1 :         return num_completions;
    4535           1 : }
    4536             : 
    4537             : const struct spdk_nvme_ctrlr_data *
    4538           0 : spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
    4539             : {
    4540           0 :         return &ctrlr->cdata;
    4541             : }
    4542             : 
    4543           0 : union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
    4544             : {
    4545             :         union spdk_nvme_csts_register csts;
    4546             : 
    4547           0 :         if (nvme_ctrlr_get_csts(ctrlr, &csts)) {
    4548           0 :                 csts.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4549           0 :         }
    4550           0 :         return csts;
    4551             : }
    4552             : 
    4553           0 : union spdk_nvme_cc_register spdk_nvme_ctrlr_get_regs_cc(struct spdk_nvme_ctrlr *ctrlr)
    4554             : {
    4555             :         union spdk_nvme_cc_register cc;
    4556             : 
    4557           0 :         if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
    4558           0 :                 cc.raw = SPDK_NVME_INVALID_REGISTER_VALUE;
    4559           0 :         }
    4560           0 :         return cc;
    4561             : }
    4562             : 
    4563           0 : union spdk_nvme_cap_register spdk_nvme_ctrlr_get_regs_cap(struct spdk_nvme_ctrlr *ctrlr)
    4564             : {
    4565           0 :         return ctrlr->cap;
    4566             : }
    4567             : 
    4568           0 : union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
    4569             : {
    4570           0 :         return ctrlr->vs;
    4571             : }
    4572             : 
    4573           0 : union spdk_nvme_cmbsz_register spdk_nvme_ctrlr_get_regs_cmbsz(struct spdk_nvme_ctrlr *ctrlr)
    4574             : {
    4575             :         union spdk_nvme_cmbsz_register cmbsz;
    4576             : 
    4577           0 :         if (nvme_ctrlr_get_cmbsz(ctrlr, &cmbsz)) {
    4578           0 :                 cmbsz.raw = 0;
    4579           0 :         }
    4580             : 
    4581           0 :         return cmbsz;
    4582             : }
    4583             : 
    4584           0 : union spdk_nvme_pmrcap_register spdk_nvme_ctrlr_get_regs_pmrcap(struct spdk_nvme_ctrlr *ctrlr)
    4585             : {
    4586             :         union spdk_nvme_pmrcap_register pmrcap;
    4587             : 
    4588           0 :         if (nvme_ctrlr_get_pmrcap(ctrlr, &pmrcap)) {
    4589           0 :                 pmrcap.raw = 0;
    4590           0 :         }
    4591             : 
    4592           0 :         return pmrcap;
    4593             : }
    4594             : 
    4595           0 : union spdk_nvme_bpinfo_register spdk_nvme_ctrlr_get_regs_bpinfo(struct spdk_nvme_ctrlr *ctrlr)
    4596             : {
    4597             :         union spdk_nvme_bpinfo_register bpinfo;
    4598             : 
    4599           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    4600           0 :                 bpinfo.raw = 0;
    4601           0 :         }
    4602             : 
    4603           0 :         return bpinfo;
    4604             : }
    4605             : 
    4606             : uint64_t
    4607           0 : spdk_nvme_ctrlr_get_pmrsz(struct spdk_nvme_ctrlr *ctrlr)
    4608             : {
    4609           0 :         return ctrlr->pmr_size;
    4610             : }
    4611             : 
    4612             : uint32_t
    4613           2 : spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
    4614             : {
    4615           2 :         return ctrlr->cdata.nn;
    4616             : }
    4617             : 
    4618             : bool
    4619        9301 : spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4620             : {
    4621             :         struct spdk_nvme_ns tmp, *ns;
    4622             : 
    4623        9301 :         tmp.id = nsid;
    4624        9301 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4625             : 
    4626        9301 :         if (ns != NULL) {
    4627        9209 :                 return ns->active;
    4628             :         }
    4629             : 
    4630          92 :         return false;
    4631        9301 : }
    4632             : 
    4633             : uint32_t
    4634          35 : spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
    4635             : {
    4636             :         struct spdk_nvme_ns *ns;
    4637             : 
    4638          35 :         ns = RB_MIN(nvme_ns_tree, &ctrlr->ns);
    4639          35 :         if (ns == NULL) {
    4640          10 :                 return 0;
    4641             :         }
    4642             : 
    4643        4618 :         while (ns != NULL) {
    4644        4615 :                 if (ns->active) {
    4645          22 :                         return ns->id;
    4646             :                 }
    4647             : 
    4648        4593 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4649             :         }
    4650             : 
    4651           3 :         return 0;
    4652          35 : }
    4653             : 
    4654             : uint32_t
    4655        4657 : spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
    4656             : {
    4657             :         struct spdk_nvme_ns tmp, *ns;
    4658             : 
    4659        4657 :         tmp.id = prev_nsid;
    4660        4657 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4661        4657 :         if (ns == NULL) {
    4662           5 :                 return 0;
    4663             :         }
    4664             : 
    4665        4652 :         ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4666        6184 :         while (ns != NULL) {
    4667        6164 :                 if (ns->active) {
    4668        4632 :                         return ns->id;
    4669             :                 }
    4670             : 
    4671        1532 :                 ns = RB_NEXT(nvme_ns_tree, &ctrlr->ns, ns);
    4672             :         }
    4673             : 
    4674          20 :         return 0;
    4675        4657 : }
    4676             : 
    4677             : struct spdk_nvme_ns *
    4678       12403 : spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4679             : {
    4680             :         struct spdk_nvme_ns tmp;
    4681             :         struct spdk_nvme_ns *ns;
    4682             : 
    4683       12403 :         if (nsid < 1 || nsid > ctrlr->cdata.nn) {
    4684          18 :                 return NULL;
    4685             :         }
    4686             : 
    4687       12385 :         nvme_ctrlr_lock(ctrlr);
    4688             : 
    4689       12385 :         tmp.id = nsid;
    4690       12385 :         ns = RB_FIND(nvme_ns_tree, &ctrlr->ns, &tmp);
    4691             : 
    4692       12385 :         if (ns == NULL) {
    4693        7687 :                 ns = spdk_zmalloc(sizeof(struct spdk_nvme_ns), 64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
    4694        7687 :                 if (ns == NULL) {
    4695           0 :                         nvme_ctrlr_unlock(ctrlr);
    4696           0 :                         return NULL;
    4697             :                 }
    4698             : 
    4699        7687 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Namespace %u was added\n", nsid);
    4700        7687 :                 ns->id = nsid;
    4701        7687 :                 RB_INSERT(nvme_ns_tree, &ctrlr->ns, ns);
    4702        7687 :         }
    4703             : 
    4704       12385 :         nvme_ctrlr_unlock(ctrlr);
    4705             : 
    4706       12385 :         return ns;
    4707       12403 : }
    4708             : 
    4709             : struct spdk_pci_device *
    4710           0 : spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr)
    4711             : {
    4712           0 :         if (ctrlr == NULL) {
    4713           0 :                 return NULL;
    4714             :         }
    4715             : 
    4716           0 :         if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
    4717           0 :                 return NULL;
    4718             :         }
    4719             : 
    4720           0 :         return nvme_ctrlr_proc_get_devhandle(ctrlr);
    4721           0 : }
    4722             : 
    4723             : int32_t
    4724           3 : spdk_nvme_ctrlr_get_numa_id(struct spdk_nvme_ctrlr *ctrlr)
    4725             : {
    4726           3 :         if (ctrlr->numa.id_valid) {
    4727           2 :                 return ctrlr->numa.id;
    4728             :         } else {
    4729           1 :                 return SPDK_ENV_NUMA_ID_ANY;
    4730             :         }
    4731           3 : }
    4732             : 
    4733             : uint16_t
    4734           0 : spdk_nvme_ctrlr_get_id(struct spdk_nvme_ctrlr *ctrlr)
    4735             : {
    4736           0 :         return ctrlr->cntlid;
    4737             : }
    4738             : 
    4739             : uint32_t
    4740           0 : spdk_nvme_ctrlr_get_max_xfer_size(const struct spdk_nvme_ctrlr *ctrlr)
    4741             : {
    4742           0 :         return ctrlr->max_xfer_size;
    4743             : }
    4744             : 
    4745             : uint16_t
    4746           0 : spdk_nvme_ctrlr_get_max_sges(const struct spdk_nvme_ctrlr *ctrlr)
    4747             : {
    4748           0 :         if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
    4749           0 :                 return ctrlr->max_sges;
    4750             :         } else {
    4751           0 :                 return UINT16_MAX;
    4752             :         }
    4753           0 : }
    4754             : 
    4755             : void
    4756           2 : spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
    4757             :                                       spdk_nvme_aer_cb aer_cb_fn,
    4758             :                                       void *aer_cb_arg)
    4759             : {
    4760             :         struct spdk_nvme_ctrlr_process *active_proc;
    4761             : 
    4762           2 :         nvme_ctrlr_lock(ctrlr);
    4763             : 
    4764           2 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4765           2 :         if (active_proc) {
    4766           2 :                 active_proc->aer_cb_fn = aer_cb_fn;
    4767           2 :                 active_proc->aer_cb_arg = aer_cb_arg;
    4768           2 :         }
    4769             : 
    4770           2 :         nvme_ctrlr_unlock(ctrlr);
    4771           2 : }
    4772             : 
    4773             : void
    4774           0 : spdk_nvme_ctrlr_disable_read_changed_ns_list_log_page(struct spdk_nvme_ctrlr *ctrlr)
    4775             : {
    4776           0 :         ctrlr->opts.disable_read_changed_ns_list_log_page = true;
    4777           0 : }
    4778             : 
    4779             : void
    4780           0 : spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
    4781             :                 uint64_t timeout_io_us, uint64_t timeout_admin_us,
    4782             :                 spdk_nvme_timeout_cb cb_fn, void *cb_arg)
    4783             : {
    4784             :         struct spdk_nvme_ctrlr_process  *active_proc;
    4785             : 
    4786           0 :         nvme_ctrlr_lock(ctrlr);
    4787             : 
    4788           0 :         active_proc = nvme_ctrlr_get_current_process(ctrlr);
    4789           0 :         if (active_proc) {
    4790           0 :                 active_proc->timeout_io_ticks = timeout_io_us * spdk_get_ticks_hz() / 1000000ULL;
    4791           0 :                 active_proc->timeout_admin_ticks = timeout_admin_us * spdk_get_ticks_hz() / 1000000ULL;
    4792           0 :                 active_proc->timeout_cb_fn = cb_fn;
    4793           0 :                 active_proc->timeout_cb_arg = cb_arg;
    4794           0 :         }
    4795             : 
    4796           0 :         ctrlr->timeout_enabled = true;
    4797             : 
    4798           0 :         nvme_ctrlr_unlock(ctrlr);
    4799           0 : }
    4800             : 
    4801             : bool
    4802           8 : spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
    4803             : {
    4804             :         /* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
    4805             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
    4806           8 :         return ctrlr->log_page_supported[log_page];
    4807             : }
    4808             : 
    4809             : bool
    4810           4 : spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
    4811             : {
    4812             :         /* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
    4813             :         SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");
    4814           4 :         return ctrlr->feature_supported[feature_code];
    4815             : }
    4816             : 
    4817             : int
    4818           1 : spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4819             :                           struct spdk_nvme_ctrlr_list *payload)
    4820             : {
    4821             :         struct nvme_completion_poll_status      *status;
    4822             :         struct spdk_nvme_ns                     *ns;
    4823             :         int                                     res;
    4824             : 
    4825           1 :         if (nsid == 0) {
    4826           0 :                 return -EINVAL;
    4827             :         }
    4828             : 
    4829           1 :         status = calloc(1, sizeof(*status));
    4830           1 :         if (!status) {
    4831           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4832           0 :                 return -ENOMEM;
    4833             :         }
    4834             : 
    4835           2 :         res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
    4836           1 :                                        nvme_completion_poll_cb, status);
    4837           1 :         if (res) {
    4838           0 :                 free(status);
    4839           0 :                 return res;
    4840             :         }
    4841           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4842           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_attach_ns failed!\n");
    4843           0 :                 if (!status->timed_out) {
    4844           0 :                         free(status);
    4845           0 :                 }
    4846           0 :                 return -ENXIO;
    4847             :         }
    4848           1 :         free(status);
    4849             : 
    4850           1 :         res = nvme_ctrlr_identify_active_ns(ctrlr);
    4851           1 :         if (res) {
    4852           0 :                 return res;
    4853             :         }
    4854             : 
    4855           1 :         ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
    4856           1 :         if (ns == NULL) {
    4857           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_get_ns failed!\n");
    4858           0 :                 return -ENXIO;
    4859             :         }
    4860             : 
    4861           1 :         return nvme_ns_construct(ns, nsid, ctrlr);
    4862           1 : }
    4863             : 
    4864             : int
    4865           1 : spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4866             :                           struct spdk_nvme_ctrlr_list *payload)
    4867             : {
    4868             :         struct nvme_completion_poll_status      *status;
    4869             :         int                                     res;
    4870             : 
    4871           1 :         if (nsid == 0) {
    4872           0 :                 return -EINVAL;
    4873             :         }
    4874             : 
    4875           1 :         status = calloc(1, sizeof(*status));
    4876           1 :         if (!status) {
    4877           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4878           0 :                 return -ENOMEM;
    4879             :         }
    4880             : 
    4881           2 :         res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
    4882           1 :                                        nvme_completion_poll_cb, status);
    4883           1 :         if (res) {
    4884           0 :                 free(status);
    4885           0 :                 return res;
    4886             :         }
    4887           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4888           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_detach_ns failed!\n");
    4889           0 :                 if (!status->timed_out) {
    4890           0 :                         free(status);
    4891           0 :                 }
    4892           0 :                 return -ENXIO;
    4893             :         }
    4894           1 :         free(status);
    4895             : 
    4896           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4897           1 : }
    4898             : 
    4899             : uint32_t
    4900           1 : spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
    4901             : {
    4902             :         struct nvme_completion_poll_status      *status;
    4903             :         int                                     res;
    4904             :         uint32_t                                nsid;
    4905             : 
    4906           1 :         status = calloc(1, sizeof(*status));
    4907           1 :         if (!status) {
    4908           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4909           0 :                 return 0;
    4910             :         }
    4911             : 
    4912           1 :         res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
    4913           1 :         if (res) {
    4914           0 :                 free(status);
    4915           0 :                 return 0;
    4916             :         }
    4917           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4918           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_create_ns failed!\n");
    4919           0 :                 if (!status->timed_out) {
    4920           0 :                         free(status);
    4921           0 :                 }
    4922           0 :                 return 0;
    4923             :         }
    4924             : 
    4925           1 :         nsid = status->cpl.cdw0;
    4926           1 :         free(status);
    4927             : 
    4928           1 :         assert(nsid > 0);
    4929             : 
    4930             :         /* Return the namespace ID that was created */
    4931           1 :         return nsid;
    4932           1 : }
    4933             : 
    4934             : int
    4935           1 : spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
    4936             : {
    4937             :         struct nvme_completion_poll_status      *status;
    4938             :         int                                     res;
    4939             : 
    4940           1 :         if (nsid == 0) {
    4941           0 :                 return -EINVAL;
    4942             :         }
    4943             : 
    4944           1 :         status = calloc(1, sizeof(*status));
    4945           1 :         if (!status) {
    4946           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4947           0 :                 return -ENOMEM;
    4948             :         }
    4949             : 
    4950           1 :         res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
    4951           1 :         if (res) {
    4952           0 :                 free(status);
    4953           0 :                 return res;
    4954             :         }
    4955           1 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4956           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_delete_ns failed!\n");
    4957           0 :                 if (!status->timed_out) {
    4958           0 :                         free(status);
    4959           0 :                 }
    4960           0 :                 return -ENXIO;
    4961             :         }
    4962           1 :         free(status);
    4963             : 
    4964           1 :         return nvme_ctrlr_identify_active_ns(ctrlr);
    4965           1 : }
    4966             : 
    4967             : int
    4968           0 : spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
    4969             :                        struct spdk_nvme_format *format)
    4970             : {
    4971             :         struct nvme_completion_poll_status      *status;
    4972             :         int                                     res;
    4973             : 
    4974           0 :         status = calloc(1, sizeof(*status));
    4975           0 :         if (!status) {
    4976           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    4977           0 :                 return -ENOMEM;
    4978             :         }
    4979             : 
    4980           0 :         res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
    4981           0 :                                     status);
    4982           0 :         if (res) {
    4983           0 :                 free(status);
    4984           0 :                 return res;
    4985             :         }
    4986           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    4987           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_format failed!\n");
    4988           0 :                 if (!status->timed_out) {
    4989           0 :                         free(status);
    4990           0 :                 }
    4991           0 :                 return -ENXIO;
    4992             :         }
    4993           0 :         free(status);
    4994             : 
    4995           0 :         return spdk_nvme_ctrlr_reset(ctrlr);
    4996           0 : }
    4997             : 
    4998             : int
    4999           8 : spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, uint32_t size,
    5000             :                                 int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
    5001             : {
    5002             :         struct spdk_nvme_fw_commit              fw_commit;
    5003             :         struct nvme_completion_poll_status      *status;
    5004             :         int                                     res;
    5005             :         unsigned int                            size_remaining;
    5006             :         unsigned int                            offset;
    5007             :         unsigned int                            transfer;
    5008             :         uint8_t                                 *p;
    5009             : 
    5010           8 :         if (!completion_status) {
    5011           0 :                 return -EINVAL;
    5012             :         }
    5013           8 :         memset(completion_status, 0, sizeof(struct spdk_nvme_status));
    5014           8 :         if (size % 4) {
    5015           1 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid size!\n");
    5016           1 :                 return -1;
    5017             :         }
    5018             : 
    5019             :         /* Current support only for SPDK_NVME_FW_COMMIT_REPLACE_IMG
    5020             :          * and SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG
    5021             :          */
    5022           7 :         if ((commit_action != SPDK_NVME_FW_COMMIT_REPLACE_IMG) &&
    5023           0 :             (commit_action != SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG)) {
    5024           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_update_firmware invalid command!\n");
    5025           0 :                 return -1;
    5026             :         }
    5027             : 
    5028           7 :         status = calloc(1, sizeof(*status));
    5029           7 :         if (!status) {
    5030           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5031           0 :                 return -ENOMEM;
    5032             :         }
    5033             : 
    5034             :         /* Firmware download */
    5035           7 :         size_remaining = size;
    5036           7 :         offset = 0;
    5037           7 :         p = payload;
    5038             : 
    5039          10 :         while (size_remaining > 0) {
    5040           7 :                 transfer = spdk_min(size_remaining, ctrlr->min_page_size);
    5041             : 
    5042           7 :                 memset(status, 0, sizeof(*status));
    5043          14 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
    5044             :                                                        nvme_completion_poll_cb,
    5045           7 :                                                        status);
    5046           7 :                 if (res) {
    5047           2 :                         free(status);
    5048           2 :                         return res;
    5049             :                 }
    5050             : 
    5051           5 :                 if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5052           2 :                         NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_fw_image_download failed!\n");
    5053           2 :                         if (!status->timed_out) {
    5054           1 :                                 free(status);
    5055           1 :                         }
    5056           2 :                         return -ENXIO;
    5057             :                 }
    5058           3 :                 p += transfer;
    5059           3 :                 offset += transfer;
    5060           3 :                 size_remaining -= transfer;
    5061             :         }
    5062             : 
    5063             :         /* Firmware commit */
    5064           3 :         memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5065           3 :         fw_commit.fs = slot;
    5066           3 :         fw_commit.ca = commit_action;
    5067             : 
    5068           3 :         memset(status, 0, sizeof(*status));
    5069           6 :         res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
    5070           3 :                                        status);
    5071           3 :         if (res) {
    5072           1 :                 free(status);
    5073           1 :                 return res;
    5074             :         }
    5075             : 
    5076           2 :         res = nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
    5077             : 
    5078           2 :         memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
    5079             : 
    5080           2 :         if (!status->timed_out) {
    5081           2 :                 free(status);
    5082           2 :         }
    5083             : 
    5084           2 :         if (res) {
    5085           1 :                 if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
    5086           0 :                     completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
    5087           1 :                         if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
    5088           0 :                             completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
    5089           0 :                                 NVME_CTRLR_NOTICELOG(ctrlr,
    5090             :                                                      "firmware activation requires conventional reset to be performed. !\n");
    5091           0 :                         } else {
    5092           1 :                                 NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5093             :                         }
    5094           1 :                         return -ENXIO;
    5095             :                 }
    5096           0 :         }
    5097             : 
    5098           1 :         return spdk_nvme_ctrlr_reset(ctrlr);
    5099           8 : }
    5100             : 
    5101             : int
    5102           0 : spdk_nvme_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
    5103             : {
    5104             :         int rc, size;
    5105             :         union spdk_nvme_cmbsz_register cmbsz;
    5106             : 
    5107           0 :         cmbsz = spdk_nvme_ctrlr_get_regs_cmbsz(ctrlr);
    5108             : 
    5109           0 :         if (cmbsz.bits.rds == 0 || cmbsz.bits.wds == 0) {
    5110           0 :                 return -ENOTSUP;
    5111             :         }
    5112             : 
    5113           0 :         size = cmbsz.bits.sz * (0x1000 << (cmbsz.bits.szu * 4));
    5114             : 
    5115           0 :         nvme_ctrlr_lock(ctrlr);
    5116           0 :         rc = nvme_transport_ctrlr_reserve_cmb(ctrlr);
    5117           0 :         nvme_ctrlr_unlock(ctrlr);
    5118             : 
    5119           0 :         if (rc < 0) {
    5120           0 :                 return rc;
    5121             :         }
    5122             : 
    5123           0 :         return size;
    5124           0 : }
    5125             : 
    5126             : void *
    5127           0 : spdk_nvme_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5128             : {
    5129             :         void *buf;
    5130             : 
    5131           0 :         nvme_ctrlr_lock(ctrlr);
    5132           0 :         buf = nvme_transport_ctrlr_map_cmb(ctrlr, size);
    5133           0 :         nvme_ctrlr_unlock(ctrlr);
    5134             : 
    5135           0 :         return buf;
    5136             : }
    5137             : 
    5138             : void
    5139           0 : spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
    5140             : {
    5141           0 :         nvme_ctrlr_lock(ctrlr);
    5142           0 :         nvme_transport_ctrlr_unmap_cmb(ctrlr);
    5143           0 :         nvme_ctrlr_unlock(ctrlr);
    5144           0 : }
    5145             : 
    5146             : int
    5147           0 : spdk_nvme_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5148             : {
    5149             :         int rc;
    5150             : 
    5151           0 :         nvme_ctrlr_lock(ctrlr);
    5152           0 :         rc = nvme_transport_ctrlr_enable_pmr(ctrlr);
    5153           0 :         nvme_ctrlr_unlock(ctrlr);
    5154             : 
    5155           0 :         return rc;
    5156             : }
    5157             : 
    5158             : int
    5159           0 : spdk_nvme_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5160             : {
    5161             :         int rc;
    5162             : 
    5163           0 :         nvme_ctrlr_lock(ctrlr);
    5164           0 :         rc = nvme_transport_ctrlr_disable_pmr(ctrlr);
    5165           0 :         nvme_ctrlr_unlock(ctrlr);
    5166             : 
    5167           0 :         return rc;
    5168             : }
    5169             : 
    5170             : void *
    5171           0 : spdk_nvme_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
    5172             : {
    5173             :         void *buf;
    5174             : 
    5175           0 :         nvme_ctrlr_lock(ctrlr);
    5176           0 :         buf = nvme_transport_ctrlr_map_pmr(ctrlr, size);
    5177           0 :         nvme_ctrlr_unlock(ctrlr);
    5178             : 
    5179           0 :         return buf;
    5180             : }
    5181             : 
    5182             : int
    5183           0 : spdk_nvme_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
    5184             : {
    5185             :         int rc;
    5186             : 
    5187           0 :         nvme_ctrlr_lock(ctrlr);
    5188           0 :         rc = nvme_transport_ctrlr_unmap_pmr(ctrlr);
    5189           0 :         nvme_ctrlr_unlock(ctrlr);
    5190             : 
    5191           0 :         return rc;
    5192             : }
    5193             : 
    5194             : int
    5195           0 : spdk_nvme_ctrlr_read_boot_partition_start(struct spdk_nvme_ctrlr *ctrlr, void *payload,
    5196             :                 uint32_t bprsz, uint32_t bprof, uint32_t bpid)
    5197             : {
    5198             :         union spdk_nvme_bprsel_register bprsel;
    5199             :         union spdk_nvme_bpinfo_register bpinfo;
    5200             :         uint64_t bpmbl, bpmb_size;
    5201             : 
    5202           0 :         if (ctrlr->cap.bits.bps == 0) {
    5203           0 :                 return -ENOTSUP;
    5204             :         }
    5205             : 
    5206           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5207           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5208           0 :                 return -EIO;
    5209             :         }
    5210             : 
    5211           0 :         if (bpinfo.bits.brs == SPDK_NVME_BRS_READ_IN_PROGRESS) {
    5212           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read already initiated\n");
    5213           0 :                 return -EALREADY;
    5214             :         }
    5215             : 
    5216           0 :         nvme_ctrlr_lock(ctrlr);
    5217             : 
    5218           0 :         bpmb_size = bprsz * 4096;
    5219           0 :         bpmbl = spdk_vtophys(payload, &bpmb_size);
    5220           0 :         if (bpmbl == SPDK_VTOPHYS_ERROR) {
    5221           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_vtophys of bpmbl failed\n");
    5222           0 :                 nvme_ctrlr_unlock(ctrlr);
    5223           0 :                 return -EFAULT;
    5224             :         }
    5225             : 
    5226           0 :         if (bpmb_size != bprsz * 4096) {
    5227           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition buffer is not physically contiguous\n");
    5228           0 :                 nvme_ctrlr_unlock(ctrlr);
    5229           0 :                 return -EFAULT;
    5230             :         }
    5231             : 
    5232           0 :         if (nvme_ctrlr_set_bpmbl(ctrlr, bpmbl)) {
    5233           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bpmbl() failed\n");
    5234           0 :                 nvme_ctrlr_unlock(ctrlr);
    5235           0 :                 return -EIO;
    5236             :         }
    5237             : 
    5238           0 :         bprsel.bits.bpid = bpid;
    5239           0 :         bprsel.bits.bprof = bprof;
    5240           0 :         bprsel.bits.bprsz = bprsz;
    5241             : 
    5242           0 :         if (nvme_ctrlr_set_bprsel(ctrlr, &bprsel)) {
    5243           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "set_bprsel() failed\n");
    5244           0 :                 nvme_ctrlr_unlock(ctrlr);
    5245           0 :                 return -EIO;
    5246             :         }
    5247             : 
    5248           0 :         nvme_ctrlr_unlock(ctrlr);
    5249           0 :         return 0;
    5250           0 : }
    5251             : 
    5252             : int
    5253           0 : spdk_nvme_ctrlr_read_boot_partition_poll(struct spdk_nvme_ctrlr *ctrlr)
    5254             : {
    5255           0 :         int rc = 0;
    5256             :         union spdk_nvme_bpinfo_register bpinfo;
    5257             : 
    5258           0 :         if (nvme_ctrlr_get_bpinfo(ctrlr, &bpinfo)) {
    5259           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "get bpinfo failed\n");
    5260           0 :                 return -EIO;
    5261             :         }
    5262             : 
    5263           0 :         switch (bpinfo.bits.brs) {
    5264             :         case SPDK_NVME_BRS_NO_READ:
    5265           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Boot Partition read not initiated\n");
    5266           0 :                 rc = -EINVAL;
    5267           0 :                 break;
    5268             :         case SPDK_NVME_BRS_READ_IN_PROGRESS:
    5269           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition read in progress\n");
    5270           0 :                 rc = -EAGAIN;
    5271           0 :                 break;
    5272             :         case SPDK_NVME_BRS_READ_ERROR:
    5273           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Error completing Boot Partition read\n");
    5274           0 :                 rc = -EIO;
    5275           0 :                 break;
    5276             :         case SPDK_NVME_BRS_READ_SUCCESS:
    5277           0 :                 NVME_CTRLR_INFOLOG(ctrlr, "Boot Partition read completed successfully\n");
    5278           0 :                 break;
    5279             :         default:
    5280           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition read status\n");
    5281           0 :                 rc = -EINVAL;
    5282           0 :         }
    5283             : 
    5284           0 :         return rc;
    5285           0 : }
    5286             : 
    5287             : static void
    5288           0 : nvme_write_boot_partition_cb(void *arg, const struct spdk_nvme_cpl *cpl)
    5289             : {
    5290             :         int res;
    5291           0 :         struct spdk_nvme_ctrlr *ctrlr = arg;
    5292             :         struct spdk_nvme_fw_commit fw_commit;
    5293           0 :         struct spdk_nvme_cpl err_cpl =
    5294             :         {.status = {.sct = SPDK_NVME_SCT_GENERIC, .sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR }};
    5295             : 
    5296           0 :         if (spdk_nvme_cpl_is_error(cpl)) {
    5297           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Write Boot Partition failed\n");
    5298           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5299           0 :                 return;
    5300             :         }
    5301             : 
    5302           0 :         if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADING) {
    5303           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Downloading at Offset %d Success\n", ctrlr->fw_offset);
    5304           0 :                 ctrlr->fw_payload = (uint8_t *)ctrlr->fw_payload + ctrlr->fw_transfer_size;
    5305           0 :                 ctrlr->fw_offset += ctrlr->fw_transfer_size;
    5306           0 :                 ctrlr->fw_size_remaining -= ctrlr->fw_transfer_size;
    5307           0 :                 ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5308           0 :                 res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5309           0 :                                                        ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5310           0 :                 if (res) {
    5311           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_image_download failed!\n");
    5312           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5313           0 :                         return;
    5314             :                 }
    5315             : 
    5316           0 :                 if (ctrlr->fw_transfer_size < ctrlr->min_page_size) {
    5317           0 :                         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADED;
    5318           0 :                 }
    5319           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_DOWNLOADED) {
    5320           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Download Success\n");
    5321           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5322           0 :                 fw_commit.bpid = ctrlr->bpid;
    5323           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_BOOT_PARTITION;
    5324           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5325           0 :                                                nvme_write_boot_partition_cb, ctrlr);
    5326           0 :                 if (res) {
    5327           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5328           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5329           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5330           0 :                         return;
    5331             :                 }
    5332             : 
    5333           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_REPLACE;
    5334           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_REPLACE) {
    5335           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Replacement Success\n");
    5336           0 :                 memset(&fw_commit, 0, sizeof(struct spdk_nvme_fw_commit));
    5337           0 :                 fw_commit.bpid = ctrlr->bpid;
    5338           0 :                 fw_commit.ca = SPDK_NVME_FW_COMMIT_ACTIVATE_BOOT_PARTITION;
    5339           0 :                 res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit,
    5340           0 :                                                nvme_write_boot_partition_cb, ctrlr);
    5341           0 :                 if (res) {
    5342           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "nvme_ctrlr_cmd_fw_commit failed!\n");
    5343           0 :                         NVME_CTRLR_ERRLOG(ctrlr, "commit action: %d\n", fw_commit.ca);
    5344           0 :                         ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5345           0 :                         return;
    5346             :                 }
    5347             : 
    5348           0 :                 ctrlr->bp_ws = SPDK_NVME_BP_WS_ACTIVATE;
    5349           0 :         } else if (ctrlr->bp_ws == SPDK_NVME_BP_WS_ACTIVATE) {
    5350           0 :                 NVME_CTRLR_DEBUGLOG(ctrlr, "Boot Partition Activation Success\n");
    5351           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, cpl);
    5352           0 :         } else {
    5353           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Invalid Boot Partition write state\n");
    5354           0 :                 ctrlr->bp_write_cb_fn(ctrlr->bp_write_cb_arg, &err_cpl);
    5355           0 :                 return;
    5356             :         }
    5357           0 : }
    5358             : 
    5359             : int
    5360           0 : spdk_nvme_ctrlr_write_boot_partition(struct spdk_nvme_ctrlr *ctrlr,
    5361             :                                      void *payload, uint32_t size, uint32_t bpid,
    5362             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    5363             : {
    5364             :         int res;
    5365             : 
    5366           0 :         if (ctrlr->cap.bits.bps == 0) {
    5367           0 :                 return -ENOTSUP;
    5368             :         }
    5369             : 
    5370           0 :         ctrlr->bp_ws = SPDK_NVME_BP_WS_DOWNLOADING;
    5371           0 :         ctrlr->bpid = bpid;
    5372           0 :         ctrlr->bp_write_cb_fn = cb_fn;
    5373           0 :         ctrlr->bp_write_cb_arg = cb_arg;
    5374           0 :         ctrlr->fw_offset = 0;
    5375           0 :         ctrlr->fw_size_remaining = size;
    5376           0 :         ctrlr->fw_payload = payload;
    5377           0 :         ctrlr->fw_transfer_size = spdk_min(ctrlr->fw_size_remaining, ctrlr->min_page_size);
    5378             : 
    5379           0 :         res = nvme_ctrlr_cmd_fw_image_download(ctrlr, ctrlr->fw_transfer_size, ctrlr->fw_offset,
    5380           0 :                                                ctrlr->fw_payload, nvme_write_boot_partition_cb, ctrlr);
    5381             : 
    5382           0 :         return res;
    5383           0 : }
    5384             : 
    5385             : bool
    5386          43 : spdk_nvme_ctrlr_is_discovery(struct spdk_nvme_ctrlr *ctrlr)
    5387             : {
    5388          43 :         assert(ctrlr);
    5389             : 
    5390          43 :         return !strncmp(ctrlr->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN,
    5391             :                         strlen(SPDK_NVMF_DISCOVERY_NQN));
    5392             : }
    5393             : 
    5394             : bool
    5395          20 : spdk_nvme_ctrlr_is_fabrics(struct spdk_nvme_ctrlr *ctrlr)
    5396             : {
    5397          20 :         assert(ctrlr);
    5398             : 
    5399          20 :         return spdk_nvme_trtype_is_fabrics(ctrlr->trid.trtype);
    5400             : }
    5401             : 
    5402             : int
    5403           0 : spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5404             :                                  uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5405             : {
    5406             :         struct nvme_completion_poll_status      *status;
    5407             :         int                                     res;
    5408             : 
    5409           0 :         status = calloc(1, sizeof(*status));
    5410           0 :         if (!status) {
    5411           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5412           0 :                 return -ENOMEM;
    5413             :         }
    5414             : 
    5415           0 :         res = spdk_nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
    5416           0 :                         nvme_completion_poll_cb, status);
    5417           0 :         if (res) {
    5418           0 :                 free(status);
    5419           0 :                 return res;
    5420             :         }
    5421           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5422           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_receive failed!\n");
    5423           0 :                 if (!status->timed_out) {
    5424           0 :                         free(status);
    5425           0 :                 }
    5426           0 :                 return -ENXIO;
    5427             :         }
    5428           0 :         free(status);
    5429             : 
    5430           0 :         return 0;
    5431           0 : }
    5432             : 
    5433             : int
    5434           0 : spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
    5435             :                               uint16_t spsp, uint8_t nssf, void *payload, size_t size)
    5436             : {
    5437             :         struct nvme_completion_poll_status      *status;
    5438             :         int                                     res;
    5439             : 
    5440           0 :         status = calloc(1, sizeof(*status));
    5441           0 :         if (!status) {
    5442           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "Failed to allocate status tracker\n");
    5443           0 :                 return -ENOMEM;
    5444             :         }
    5445             : 
    5446           0 :         res = spdk_nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size,
    5447             :                                                 nvme_completion_poll_cb,
    5448           0 :                                                 status);
    5449           0 :         if (res) {
    5450           0 :                 free(status);
    5451           0 :                 return res;
    5452             :         }
    5453           0 :         if (nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
    5454           0 :                 NVME_CTRLR_ERRLOG(ctrlr, "spdk_nvme_ctrlr_cmd_security_send failed!\n");
    5455           0 :                 if (!status->timed_out) {
    5456           0 :                         free(status);
    5457           0 :                 }
    5458           0 :                 return -ENXIO;
    5459             :         }
    5460             : 
    5461           0 :         free(status);
    5462             : 
    5463           0 :         return 0;
    5464           0 : }
    5465             : 
    5466             : uint64_t
    5467           1 : spdk_nvme_ctrlr_get_flags(struct spdk_nvme_ctrlr *ctrlr)
    5468             : {
    5469           1 :         return ctrlr->flags;
    5470             : }
    5471             : 
    5472             : const struct spdk_nvme_transport_id *
    5473           0 : spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
    5474             : {
    5475           0 :         return &ctrlr->trid;
    5476             : }
    5477             : 
    5478             : int32_t
    5479          17 : spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
    5480             : {
    5481             :         uint32_t qid;
    5482             : 
    5483          17 :         assert(ctrlr->free_io_qids);
    5484          17 :         nvme_ctrlr_lock(ctrlr);
    5485          17 :         qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
    5486          17 :         if (qid > ctrlr->opts.num_io_queues) {
    5487           2 :                 NVME_CTRLR_ERRLOG(ctrlr, "No free I/O queue IDs\n");
    5488           2 :                 nvme_ctrlr_unlock(ctrlr);
    5489           2 :                 return -1;
    5490             :         }
    5491             : 
    5492          15 :         spdk_bit_array_clear(ctrlr->free_io_qids, qid);
    5493          15 :         nvme_ctrlr_unlock(ctrlr);
    5494          15 :         return qid;
    5495          17 : }
    5496             : 
    5497             : void
    5498          64 : spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
    5499             : {
    5500          64 :         assert(qid <= ctrlr->opts.num_io_queues);
    5501             : 
    5502          64 :         nvme_ctrlr_lock(ctrlr);
    5503             : 
    5504          64 :         if (spdk_likely(ctrlr->free_io_qids)) {
    5505          64 :                 spdk_bit_array_set(ctrlr->free_io_qids, qid);
    5506          64 :         }
    5507             : 
    5508          64 :         nvme_ctrlr_unlock(ctrlr);
    5509          64 : }
    5510             : 
    5511             : int
    5512           2 : spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
    5513             :                                    struct spdk_memory_domain **domains, int array_size)
    5514             : {
    5515           2 :         return nvme_transport_ctrlr_get_memory_domains(ctrlr, domains, array_size);
    5516             : }
    5517             : 
    5518             : int
    5519           0 : spdk_nvme_ctrlr_authenticate(struct spdk_nvme_ctrlr *ctrlr,
    5520             :                              spdk_nvme_authenticate_cb cb_fn, void *cb_ctx)
    5521             : {
    5522           0 :         return spdk_nvme_qpair_authenticate(ctrlr->adminq, cb_fn, cb_ctx);
    5523             : }

Generated by: LCOV version 1.15