Bug Summary

File:nvmf.c
Warning:line 801, column 3
Access to field 'tgt' results in a dereference of a null pointer (loaded from variable 'referral')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-redhat-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name nvmf.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu icelake-server -target-feature +prfchw -target-feature -cldemote -target-feature +avx -target-feature +aes -target-feature +sahf -target-feature +pclmul -target-feature -xop -target-feature +crc32 -target-feature +xsaves -target-feature -avx512fp16 -target-feature -sm4 -target-feature +sse4.1 -target-feature +avx512ifma -target-feature +xsave -target-feature -avx512pf -target-feature +sse4.2 -target-feature -tsxldtrk -target-feature -ptwrite -target-feature -widekl -target-feature -sm3 -target-feature +invpcid -target-feature +64bit -target-feature +xsavec -target-feature +avx512vpopcntdq -target-feature +cmov -target-feature -avx512vp2intersect -target-feature +avx512cd -target-feature +movbe -target-feature -avxvnniint8 -target-feature -avx512er -target-feature -amx-int8 -target-feature -kl -target-feature -sha512 -target-feature -avxvnni -target-feature -rtm -target-feature +adx -target-feature +avx2 -target-feature -hreset -target-feature -movdiri -target-feature -serialize -target-feature +vpclmulqdq -target-feature +avx512vl -target-feature -uintr -target-feature +clflushopt -target-feature -raoint -target-feature -cmpccxadd -target-feature +bmi -target-feature -amx-tile -target-feature +sse -target-feature +gfni -target-feature -avxvnniint16 -target-feature -amx-fp16 -target-feature +xsaveopt -target-feature +rdrnd -target-feature -amx-bf16 -target-feature -avx512bf16 -target-feature +avx512vnni -target-feature +cx8 -target-feature +avx512bw -target-feature +sse3 -target-feature +pku -target-feature +fsgsbase -target-feature -clzero -target-feature -mwaitx -target-feature -lwp -target-feature +lzcnt -target-feature +sha -target-feature -movdir64b -target-feature +wbnoinvd -target-feature -enqcmd -target-feature -prefetchwt1 -target-feature -avxneconvert -target-feature -tbm -target-feature +pconfig -target-feature -amx-complex -target-feature +ssse3 -target-feature +cx16 -target-feature +bmi2 -target-feature +fma -target-feature +popcnt -target-feature -avxifma -target-feature +f16c -target-feature +avx512bitalg -target-feature -rdpru -target-feature +clwb -target-feature +mmx -target-feature +sse2 -target-feature +rdseed -target-feature +avx512vbmi2 -target-feature -prefetchi -target-feature +rdpid -target-feature -fma4 -target-feature +avx512vbmi -target-feature -shstk -target-feature +vaes -target-feature -waitpkg -target-feature +sgx -target-feature +fxsr -target-feature +avx512dq -target-feature -sse4a -target-feature -avx512f -debugger-tuning=gdb -fcoverage-compilation-dir=/var/jenkins/workspace/scan-build-docker-autotest/spdk/lib/nvmf -coverage-notes-file /var/jenkins/workspace/scan-build-docker-autotest/output/scan-build-tmp/2024-11-05-100141-17067-1.gcno -coverage-data-file /var/jenkins/workspace/scan-build-docker-autotest/output/scan-build-tmp/2024-11-05-100141-17067-1.gcda -resource-dir /usr/bin/../lib/clang/17 -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/build/libvfio-user/usr/local/include -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/include -D _GNU_SOURCE -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isa-l/.. -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isalbuild -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isa-l-crypto/.. -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isalcryptobuild -D DEBUG -D SPDK_GIT_COMMIT=ae4943d20 -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/build/libvfio-user/usr/local/include -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/build/libvfio-user/usr/local/include -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/include -D _GNU_SOURCE -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isa-l/.. -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isalbuild -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isa-l-crypto/.. -I /var/jenkins/workspace/scan-build-docker-autotest/spdk/isalcryptobuild -D DEBUG -D SPDK_GIT_COMMIT=ae4943d20 -internal-isystem /usr/bin/../lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib/gcc/x86_64-redhat-linux/13/../../../../x86_64-redhat-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O0 -Wno-unused-parameter -Wno-missing-field-initializers -Wno-pointer-sign -Wno-unused-parameter -Wno-missing-field-initializers -Wno-pointer-sign -std=gnu11 -fdebug-compilation-dir=/var/jenkins/workspace/scan-build-docker-autotest/spdk/lib/nvmf -ferror-limit 19 -fsanitize=address,alignment,array-bounds,bool,builtin,enum,float-cast-overflow,function,integer-divide-by-zero,nonnull-attribute,null,pointer-overflow,return,returns-nonnull-attribute,shift-base,shift-exponent,signed-integer-overflow,unreachable,vla-bound,vptr -fsanitize-recover=alignment,array-bounds,bool,builtin,enum,float-cast-overflow,function,integer-divide-by-zero,nonnull-attribute,null,pointer-overflow,returns-nonnull-attribute,shift-base,shift-exponent,signed-integer-overflow,vla-bound,vptr -fsanitize-system-ignorelist=/usr/bin/../lib/clang/17/share/asan_ignorelist.txt -fno-sanitize-memory-param-retval -fsanitize-address-use-after-scope -fsanitize-address-globals-dead-stripping -fno-assume-sane-operator-new -stack-protector 1 -fgnuc-version=4.2.1 -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /var/jenkins/workspace/scan-build-docker-autotest/output/scan-build-tmp/2024-11-05-100141-17067-1 -x c nvmf.c
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 */
6
7#include "spdk/stdinc.h"
8
9#include "spdk/bdev.h"
10#include "spdk/bit_array.h"
11#include "spdk/thread.h"
12#include "spdk/nvmf.h"
13#include "spdk/endian.h"
14#include "spdk/string.h"
15#include "spdk/log.h"
16#include "spdk_internal/usdt.h"
17
18#include "nvmf_internal.h"
19#include "transport.h"
20
21SPDK_LOG_REGISTER_COMPONENT(nvmf)struct spdk_log_flag SPDK_LOG_nvmf = { .name = "nvmf", .enabled
= 0, }; __attribute__((constructor)) static void register_flag_nvmf
(void) { spdk_log_register_flag("nvmf", &SPDK_LOG_nvmf); }
22
23#define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS1024 1024
24
25static TAILQ_HEAD(, spdk_nvmf_tgt)struct { struct spdk_nvmf_tgt *tqh_first; struct spdk_nvmf_tgt
* *tqh_last; }
g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts){ ((void*)0), &(g_nvmf_tgts).tqh_first };
26
27typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status);
28
29/* supplied to a single call to nvmf_qpair_disconnect */
30struct nvmf_qpair_disconnect_ctx {
31 struct spdk_nvmf_qpair *qpair;
32 struct spdk_nvmf_ctrlr *ctrlr;
33 uint16_t qid;
34};
35
36/*
37 * There are several times when we need to iterate through the list of all qpairs and selectively delete them.
38 * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from
39 * to enable calling nvmf_qpair_disconnect on the next desired qpair.
40 */
41struct nvmf_qpair_disconnect_many_ctx {
42 struct spdk_nvmf_subsystem *subsystem;
43 struct spdk_nvmf_poll_group *group;
44 spdk_nvmf_poll_group_mod_done cpl_fn;
45 void *cpl_ctx;
46};
47
48static struct spdk_nvmf_referral *
49nvmf_tgt_find_referral(struct spdk_nvmf_tgt *tgt,
50 const struct spdk_nvme_transport_id *trid)
51{
52 struct spdk_nvmf_referral *referral;
53
54 TAILQ_FOREACH(referral, &tgt->referrals, link)for ((referral) = ((&tgt->referrals)->tqh_first); (
referral); (referral) = ((referral)->link.tqe_next))
{
55 if (spdk_nvme_transport_id_compare(&referral->trid, trid) == 0) {
56 return referral;
57 }
58 }
59
60 return NULL((void*)0);
61}
62
63int
64spdk_nvmf_tgt_add_referral(struct spdk_nvmf_tgt *tgt,
65 const struct spdk_nvmf_referral_opts *uopts)
66{
67 struct spdk_nvmf_referral *referral;
68 struct spdk_nvmf_referral_opts opts = {};
69 struct spdk_nvme_transport_id *trid = &opts.trid;
70
71 memcpy(&opts, uopts, spdk_min(uopts->size, sizeof(opts))(((uopts->size)<(sizeof(opts)))?(uopts->size):(sizeof
(opts)))
);
72 if (trid->subnqn[0] == '\0') {
73 snprintf(trid->subnqn, sizeof(trid->subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN"nqn.2014-08.org.nvmexpress.discovery");
74 }
75
76 if (!nvmf_nqn_is_valid(trid->subnqn)) {
77 SPDK_ERRLOG("Invalid subsystem NQN\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 77, __func__, "Invalid subsystem NQN\n"
)
;
78 return -EINVAL22;
79 }
80
81 /* If the entry already exists, just ignore it. */
82 if (nvmf_tgt_find_referral(tgt, trid)) {
83 return 0;
84 }
85
86 referral = calloc(1, sizeof(*referral));
87 if (!referral) {
88 SPDK_ERRLOG("Failed to allocate memory for a referral\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 88, __func__, "Failed to allocate memory for a referral\n"
)
;
89 return -ENOMEM12;
90 }
91
92 referral->entry.subtype = nvmf_nqn_is_discovery(trid->subnqn) ?
93 SPDK_NVMF_SUBTYPE_DISCOVERY :
94 SPDK_NVMF_SUBTYPE_NVME;
95 referral->entry.treq.secure_channel = opts.secure_channel ?
96 SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED :
97 SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED;
98 referral->entry.cntlid = 0xffff;
99 referral->entry.trtype = trid->trtype;
100 referral->entry.adrfam = trid->adrfam;
101 referral->allow_any_host = opts.allow_any_host;
102 memcpy(&referral->trid, trid, sizeof(struct spdk_nvme_transport_id));
103 spdk_strcpy_pad(referral->entry.subnqn, trid->subnqn, sizeof(trid->subnqn), '\0');
104 spdk_strcpy_pad(referral->entry.trsvcid, trid->trsvcid, sizeof(referral->entry.trsvcid), ' ');
105 spdk_strcpy_pad(referral->entry.traddr, trid->traddr, sizeof(referral->entry.traddr), ' ');
106
107 TAILQ_INSERT_HEAD(&tgt->referrals, referral, link)do { if (((referral)->link.tqe_next = (&tgt->referrals
)->tqh_first) != ((void*)0)) (&tgt->referrals)->
tqh_first->link.tqe_prev = &(referral)->link.tqe_next
; else (&tgt->referrals)->tqh_last = &(referral
)->link.tqe_next; (&tgt->referrals)->tqh_first =
(referral); (referral)->link.tqe_prev = &(&tgt->
referrals)->tqh_first; } while ( 0)
;
108 spdk_nvmf_send_discovery_log_notice(tgt, NULL((void*)0));
109
110 return 0;
111}
112
113int
114spdk_nvmf_tgt_remove_referral(struct spdk_nvmf_tgt *tgt,
115 const struct spdk_nvmf_referral_opts *uopts)
116{
117 struct spdk_nvmf_referral *referral;
118 struct spdk_nvmf_referral_opts opts = {};
119 struct spdk_nvme_transport_id *trid = &opts.trid;
120
121 memcpy(&opts, uopts, spdk_min(uopts->size, sizeof(opts))(((uopts->size)<(sizeof(opts)))?(uopts->size):(sizeof
(opts)))
);
122 if (trid->subnqn[0] == '\0') {
123 snprintf(trid->subnqn, sizeof(trid->subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN"nqn.2014-08.org.nvmexpress.discovery");
124 }
125
126 referral = nvmf_tgt_find_referral(tgt, &opts.trid);
127 if (referral == NULL((void*)0)) {
128 return -ENOENT2;
129 }
130
131 TAILQ_REMOVE(&tgt->referrals, referral, link)do { __typeof__(referral) _elm; if (((referral)->link.tqe_next
) != ((void*)0)) (referral)->link.tqe_next->link.tqe_prev
= (referral)->link.tqe_prev; else (&tgt->referrals
)->tqh_last = (referral)->link.tqe_prev; *(referral)->
link.tqe_prev = (referral)->link.tqe_next; for ((_elm) = (
(&tgt->referrals)->tqh_first); (_elm); (_elm) = ((_elm
)->link.tqe_next)) { ((void) sizeof ((_elm != referral) ? 1
: 0), __extension__ ({ if (_elm != referral) ; else __assert_fail
("_elm != referral", "nvmf.c", 131, __extension__ __PRETTY_FUNCTION__
); })); } } while (0)
;
132 spdk_nvmf_send_discovery_log_notice(tgt, NULL((void*)0));
133
134 free(referral);
135
136 return 0;
137}
138
139void
140nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair,
141 enum spdk_nvmf_qpair_state state)
142{
143 assert(qpair != NULL)((void) sizeof ((qpair != ((void*)0)) ? 1 : 0), __extension__
({ if (qpair != ((void*)0)) ; else __assert_fail ("qpair != NULL"
, "nvmf.c", 143, __extension__ __PRETTY_FUNCTION__); }))
;
144 assert(qpair->group->thread == spdk_get_thread())((void) sizeof ((qpair->group->thread == spdk_get_thread
()) ? 1 : 0), __extension__ ({ if (qpair->group->thread
== spdk_get_thread()) ; else __assert_fail ("qpair->group->thread == spdk_get_thread()"
, "nvmf.c", 144, __extension__ __PRETTY_FUNCTION__); }))
;
145
146 qpair->state = state;
147}
148
149static int
150nvmf_poll_group_poll(void *ctx)
151{
152 struct spdk_nvmf_poll_group *group = ctx;
153 int rc;
154 int count = 0;
155 struct spdk_nvmf_transport_poll_group *tgroup;
156
157 TAILQ_FOREACH(tgroup, &group->tgroups, link)for ((tgroup) = ((&group->tgroups)->tqh_first); (tgroup
); (tgroup) = ((tgroup)->link.tqe_next))
{
158 rc = nvmf_transport_poll_group_poll(tgroup);
159 if (rc < 0) {
160 return SPDK_POLLER_BUSY;
161 }
162 count += rc;
163 }
164
165 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
166}
167
168/*
169 * Reset and clean up the poll group (I/O channel code will actually free the
170 * group).
171 */
172static void
173nvmf_tgt_cleanup_poll_group(struct spdk_nvmf_poll_group *group)
174{
175 struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
176 struct spdk_nvmf_subsystem_poll_group *sgroup;
177 uint32_t sid, nsid;
178
179 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp)for ((tgroup) = (((&group->tgroups))->tqh_first); (
tgroup) && ((tmp) = (((tgroup))->link.tqe_next), 1
); (tgroup) = (tmp))
{
180 TAILQ_REMOVE(&group->tgroups, tgroup, link)do { __typeof__(tgroup) _elm; if (((tgroup)->link.tqe_next
) != ((void*)0)) (tgroup)->link.tqe_next->link.tqe_prev
= (tgroup)->link.tqe_prev; else (&group->tgroups)->
tqh_last = (tgroup)->link.tqe_prev; *(tgroup)->link.tqe_prev
= (tgroup)->link.tqe_next; for ((_elm) = ((&group->
tgroups)->tqh_first); (_elm); (_elm) = ((_elm)->link.tqe_next
)) { ((void) sizeof ((_elm != tgroup) ? 1 : 0), __extension__
({ if (_elm != tgroup) ; else __assert_fail ("_elm != tgroup"
, "nvmf.c", 180, __extension__ __PRETTY_FUNCTION__); })); } }
while (0)
;
181 nvmf_transport_poll_group_destroy(tgroup);
182 }
183
184 for (sid = 0; sid < group->num_sgroups; sid++) {
185 sgroup = &group->sgroups[sid];
186
187 assert(sgroup != NULL)((void) sizeof ((sgroup != ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup != ((void*)0)) ; else __assert_fail ("sgroup != NULL"
, "nvmf.c", 187, __extension__ __PRETTY_FUNCTION__); }))
;
188
189 for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
190 if (sgroup->ns_info[nsid].channel) {
191 spdk_put_io_channel(sgroup->ns_info[nsid].channel);
192 sgroup->ns_info[nsid].channel = NULL((void*)0);
193 }
194 }
195
196 free(sgroup->ns_info);
197 }
198
199 free(group->sgroups);
200
201 spdk_poller_unregister(&group->poller);
202
203 if (group->destroy_cb_fn) {
204 group->destroy_cb_fn(group->destroy_cb_arg, 0);
205 }
206}
207
208/*
209 * Callback to unregister a poll group from the target, and clean up its state.
210 */
211static void
212nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
213{
214 struct spdk_nvmf_tgt *tgt = io_device;
215 struct spdk_nvmf_poll_group *group = ctx_buf;
216
217 SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group, spdk_thread_get_id(group->thread));
218
219 pthread_mutex_lock(&tgt->mutex);
220 TAILQ_REMOVE(&tgt->poll_groups, group, link)do { __typeof__(group) _elm; if (((group)->link.tqe_next) !=
((void*)0)) (group)->link.tqe_next->link.tqe_prev = (group
)->link.tqe_prev; else (&tgt->poll_groups)->tqh_last
= (group)->link.tqe_prev; *(group)->link.tqe_prev = (group
)->link.tqe_next; for ((_elm) = ((&tgt->poll_groups
)->tqh_first); (_elm); (_elm) = ((_elm)->link.tqe_next)
) { ((void) sizeof ((_elm != group) ? 1 : 0), __extension__ (
{ if (_elm != group) ; else __assert_fail ("_elm != group", "nvmf.c"
, 220, __extension__ __PRETTY_FUNCTION__); })); } } while (0)
;
221 tgt->num_poll_groups--;
222 pthread_mutex_unlock(&tgt->mutex);
223
224 assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING))((void) sizeof ((!(tgt->state == NVMF_TGT_PAUSING || tgt->
state == NVMF_TGT_RESUMING)) ? 1 : 0), __extension__ ({ if (!
(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING
)) ; else __assert_fail ("!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING)"
, "nvmf.c", 224, __extension__ __PRETTY_FUNCTION__); }))
;
225 nvmf_tgt_cleanup_poll_group(group);
226}
227
228static int
229nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
230 struct spdk_nvmf_transport *transport)
231{
232 struct spdk_nvmf_transport_poll_group *tgroup = nvmf_get_transport_poll_group(group, transport);
233
234 if (tgroup != NULL((void*)0)) {
235 /* Transport already in the poll group */
236 return 0;
237 }
238
239 tgroup = nvmf_transport_poll_group_create(transport, group);
240 if (!tgroup) {
241 SPDK_ERRLOG("Unable to create poll group for transport\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 241, __func__, "Unable to create poll group for transport\n"
)
;
242 return -1;
243 }
244 SPDK_DTRACE_PROBE2_TICKS(nvmf_transport_poll_group_create, transport,
245 spdk_thread_get_id(group->thread));
246
247 tgroup->group = group;
248 TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link)do { (tgroup)->link.tqe_next = ((void*)0); (tgroup)->link
.tqe_prev = (&group->tgroups)->tqh_last; *(&group
->tgroups)->tqh_last = (tgroup); (&group->tgroups
)->tqh_last = &(tgroup)->link.tqe_next; } while ( 0
)
;
249
250 return 0;
251}
252
253static int
254nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
255{
256 struct spdk_nvmf_tgt *tgt = io_device;
257 struct spdk_nvmf_poll_group *group = ctx_buf;
258 struct spdk_nvmf_transport *transport;
259 struct spdk_nvmf_subsystem *subsystem;
260 struct spdk_thread *thread = spdk_get_thread();
261 uint32_t i;
262 int rc;
263
264 group->tgt = tgt;
265 TAILQ_INIT(&group->tgroups)do { (&group->tgroups)->tqh_first = ((void*)0); (&
group->tgroups)->tqh_last = &(&group->tgroups
)->tqh_first; } while ( 0)
;
266 TAILQ_INIT(&group->qpairs)do { (&group->qpairs)->tqh_first = ((void*)0); (&
group->qpairs)->tqh_last = &(&group->qpairs)
->tqh_first; } while ( 0)
;
267 group->thread = thread;
268 pthread_mutex_init(&group->mutex, NULL((void*)0));
269
270 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0)spdk_poller_register_named(nvmf_poll_group_poll, group, 0, "nvmf_poll_group_poll"
)
;
271 spdk_poller_register_interrupt(group->poller, NULL((void*)0), NULL((void*)0));
272
273 SPDK_DTRACE_PROBE1_TICKS(nvmf_create_poll_group, spdk_thread_get_id(thread));
274
275 TAILQ_FOREACH(transport, &tgt->transports, link)for ((transport) = ((&tgt->transports)->tqh_first);
(transport); (transport) = ((transport)->link.tqe_next))
{
276 rc = nvmf_poll_group_add_transport(group, transport);
277 if (rc != 0) {
278 nvmf_tgt_cleanup_poll_group(group);
279 return rc;
280 }
281 }
282
283 group->num_sgroups = tgt->max_subsystems;
284 group->sgroups = calloc(tgt->max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group));
285 if (!group->sgroups) {
286 nvmf_tgt_cleanup_poll_group(group);
287 return -ENOMEM12;
288 }
289
290 for (i = 0; i < tgt->max_subsystems; i++) {
291 TAILQ_INIT(&group->sgroups[i].queued)do { (&group->sgroups[i].queued)->tqh_first = ((void
*)0); (&group->sgroups[i].queued)->tqh_last = &
(&group->sgroups[i].queued)->tqh_first; } while ( 0
)
;
292 }
293
294 for (subsystem = spdk_nvmf_subsystem_get_first(tgt);
295 subsystem != NULL((void*)0);
296 subsystem = spdk_nvmf_subsystem_get_next(subsystem)) {
297 if (nvmf_poll_group_add_subsystem(group, subsystem, NULL((void*)0), NULL((void*)0)) != 0) {
298 nvmf_tgt_cleanup_poll_group(group);
299 return -1;
300 }
301 }
302
303 pthread_mutex_lock(&tgt->mutex);
304 tgt->num_poll_groups++;
305 TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link)do { (group)->link.tqe_next = ((void*)0); (group)->link
.tqe_prev = (&tgt->poll_groups)->tqh_last; *(&tgt
->poll_groups)->tqh_last = (group); (&tgt->poll_groups
)->tqh_last = &(group)->link.tqe_next; } while ( 0)
;
306 pthread_mutex_unlock(&tgt->mutex);
307
308 return 0;
309}
310
311static void
312_nvmf_tgt_disconnect_qpairs(void *ctx)
313{
314 struct spdk_nvmf_qpair *qpair, *qpair_tmp;
315 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
316 struct spdk_nvmf_poll_group *group = qpair_ctx->group;
317 struct spdk_io_channel *ch;
318 int rc;
319
320 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp)for ((qpair) = (((&group->qpairs))->tqh_first); (qpair
) && ((qpair_tmp) = (((qpair))->link.tqe_next), 1)
; (qpair) = (qpair_tmp))
{
321 rc = spdk_nvmf_qpair_disconnect(qpair);
322 if (rc && rc != -EINPROGRESS115) {
323 break;
324 }
325 }
326
327 if (TAILQ_EMPTY(&group->qpairs)((&group->qpairs)->tqh_first == ((void*)0))) {
328 /* When the refcount from the channels reaches 0, nvmf_tgt_destroy_poll_group will be called. */
329 ch = spdk_io_channel_from_ctx(group);
330 spdk_put_io_channel(ch);
331 free(qpair_ctx);
332 return;
333 }
334
335 /* Some qpairs are in process of being disconnected. Send a message and try to remove them again */
336 spdk_thread_send_msg(spdk_get_thread(), _nvmf_tgt_disconnect_qpairs, ctx);
337}
338
339static void
340nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group)
341{
342 struct nvmf_qpair_disconnect_many_ctx *ctx;
343
344 SPDK_DTRACE_PROBE1_TICKS(nvmf_destroy_poll_group_qpairs, spdk_thread_get_id(group->thread));
345
346 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
347 if (!ctx) {
348 SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 348, __func__, "Failed to allocate memory for destroy poll group ctx\n"
)
;
349 return;
350 }
351
352 ctx->group = group;
353 _nvmf_tgt_disconnect_qpairs(ctx);
354}
355
356struct spdk_nvmf_tgt *
357spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *_opts)
358{
359 struct spdk_nvmf_tgt *tgt, *tmp_tgt;
360 struct spdk_nvmf_target_opts opts = {
361 .max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS1024,
362 .discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY,
363 };
364
365 memcpy(&opts, _opts, _opts->size);
366 if (strnlen(opts.name, NVMF_TGT_NAME_MAX_LENGTH256) == NVMF_TGT_NAME_MAX_LENGTH256) {
367 SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 367, __func__, "Provided target name exceeds the max length of %u.\n"
, 256)
;
368 return NULL((void*)0);
369 }
370
371 TAILQ_FOREACH(tmp_tgt, &g_nvmf_tgts, link)for ((tmp_tgt) = ((&g_nvmf_tgts)->tqh_first); (tmp_tgt
); (tmp_tgt) = ((tmp_tgt)->link.tqe_next))
{
372 if (!strncmp(opts.name, tmp_tgt->name, NVMF_TGT_NAME_MAX_LENGTH256)) {
373 SPDK_ERRLOG("Provided target name must be unique.\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 373, __func__, "Provided target name must be unique.\n"
)
;
374 return NULL((void*)0);
375 }
376 }
377
378 tgt = calloc(1, sizeof(*tgt));
379 if (!tgt) {
380 return NULL((void*)0);
381 }
382
383 snprintf(tgt->name, NVMF_TGT_NAME_MAX_LENGTH256, "%s", opts.name);
384
385 if (!opts.max_subsystems) {
386 tgt->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS1024;
387 } else {
388 tgt->max_subsystems = opts.max_subsystems;
389 }
390
391 tgt->crdt[0] = opts.crdt[0];
392 tgt->crdt[1] = opts.crdt[1];
393 tgt->crdt[2] = opts.crdt[2];
394 tgt->discovery_filter = opts.discovery_filter;
395 tgt->discovery_genctr = 0;
396 tgt->dhchap_digests = opts.dhchap_digests;
397 tgt->dhchap_dhgroups = opts.dhchap_dhgroups;
398 TAILQ_INIT(&tgt->transports)do { (&tgt->transports)->tqh_first = ((void*)0); (&
tgt->transports)->tqh_last = &(&tgt->transports
)->tqh_first; } while ( 0)
;
399 TAILQ_INIT(&tgt->poll_groups)do { (&tgt->poll_groups)->tqh_first = ((void*)0); (
&tgt->poll_groups)->tqh_last = &(&tgt->poll_groups
)->tqh_first; } while ( 0)
;
400 TAILQ_INIT(&tgt->referrals)do { (&tgt->referrals)->tqh_first = ((void*)0); (&
tgt->referrals)->tqh_last = &(&tgt->referrals
)->tqh_first; } while ( 0)
;
401 tgt->num_poll_groups = 0;
402
403 tgt->subsystem_ids = spdk_bit_array_create(tgt->max_subsystems);
404 if (tgt->subsystem_ids == NULL((void*)0)) {
405 free(tgt);
406 return NULL((void*)0);
407 }
408
409 RB_INIT(&tgt->subsystems)do { (&tgt->subsystems)->rbh_root = ((void*)0); } while
( 0)
;
410
411 pthread_mutex_init(&tgt->mutex, NULL((void*)0));
412
413 spdk_io_device_register(tgt,
414 nvmf_tgt_create_poll_group,
415 nvmf_tgt_destroy_poll_group,
416 sizeof(struct spdk_nvmf_poll_group),
417 tgt->name);
418
419 tgt->state = NVMF_TGT_RUNNING;
420
421 TAILQ_INSERT_HEAD(&g_nvmf_tgts, tgt, link)do { if (((tgt)->link.tqe_next = (&g_nvmf_tgts)->tqh_first
) != ((void*)0)) (&g_nvmf_tgts)->tqh_first->link.tqe_prev
= &(tgt)->link.tqe_next; else (&g_nvmf_tgts)->
tqh_last = &(tgt)->link.tqe_next; (&g_nvmf_tgts)->
tqh_first = (tgt); (tgt)->link.tqe_prev = &(&g_nvmf_tgts
)->tqh_first; } while ( 0)
;
422
423 return tgt;
424}
425
426static void
427_nvmf_tgt_destroy_next_transport(void *ctx)
428{
429 struct spdk_nvmf_tgt *tgt = ctx;
430 struct spdk_nvmf_transport *transport;
431
432 if (!TAILQ_EMPTY(&tgt->transports)((&tgt->transports)->tqh_first == ((void*)0))) {
433 transport = TAILQ_FIRST(&tgt->transports)((&tgt->transports)->tqh_first);
434 TAILQ_REMOVE(&tgt->transports, transport, link)do { __typeof__(transport) _elm; if (((transport)->link.tqe_next
) != ((void*)0)) (transport)->link.tqe_next->link.tqe_prev
= (transport)->link.tqe_prev; else (&tgt->transports
)->tqh_last = (transport)->link.tqe_prev; *(transport)->
link.tqe_prev = (transport)->link.tqe_next; for ((_elm) = (
(&tgt->transports)->tqh_first); (_elm); (_elm) = ((
_elm)->link.tqe_next)) { ((void) sizeof ((_elm != transport
) ? 1 : 0), __extension__ ({ if (_elm != transport) ; else __assert_fail
("_elm != transport", "nvmf.c", 434, __extension__ __PRETTY_FUNCTION__
); })); } } while (0)
;
435 spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt);
436 } else {
437 spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn;
438 void *destroy_cb_arg = tgt->destroy_cb_arg;
439
440 pthread_mutex_destroy(&tgt->mutex);
441 free(tgt);
442
443 if (destroy_cb_fn) {
444 destroy_cb_fn(destroy_cb_arg, 0);
445 }
446 }
447}
448
449static void
450nvmf_tgt_destroy_cb(void *io_device)
451{
452 struct spdk_nvmf_tgt *tgt = io_device;
453 struct spdk_nvmf_subsystem *subsystem, *subsystem_next;
454 int rc;
455 struct spdk_nvmf_referral *referral;
456
457 while ((referral = TAILQ_FIRST(&tgt->referrals)((&tgt->referrals)->tqh_first))) {
458 TAILQ_REMOVE(&tgt->referrals, referral, link)do { __typeof__(referral) _elm; if (((referral)->link.tqe_next
) != ((void*)0)) (referral)->link.tqe_next->link.tqe_prev
= (referral)->link.tqe_prev; else (&tgt->referrals
)->tqh_last = (referral)->link.tqe_prev; *(referral)->
link.tqe_prev = (referral)->link.tqe_next; for ((_elm) = (
(&tgt->referrals)->tqh_first); (_elm); (_elm) = ((_elm
)->link.tqe_next)) { ((void) sizeof ((_elm != referral) ? 1
: 0), __extension__ ({ if (_elm != referral) ; else __assert_fail
("_elm != referral", "nvmf.c", 458, __extension__ __PRETTY_FUNCTION__
); })); } } while (0)
;
459 free(referral);
460 }
461
462 nvmf_tgt_stop_mdns_prr(tgt);
463
464 /* We will be freeing subsystems in this loop, so we always need to get the next one
465 * ahead of time, since we can't call get_next() on a subsystem that's been freed.
466 */
467 for (subsystem = spdk_nvmf_subsystem_get_first(tgt),
468 subsystem_next = spdk_nvmf_subsystem_get_next(subsystem);
469 subsystem != NULL((void*)0);
470 subsystem = subsystem_next,
471 subsystem_next = spdk_nvmf_subsystem_get_next(subsystem_next)) {
472 nvmf_subsystem_remove_all_listeners(subsystem, true1);
473
474 rc = spdk_nvmf_subsystem_destroy(subsystem, nvmf_tgt_destroy_cb, tgt);
475 if (rc) {
476 if (rc == -EINPROGRESS115) {
477 /* If rc is -EINPROGRESS, nvmf_tgt_destroy_cb will be called again when subsystem #i
478 * is destroyed, nvmf_tgt_destroy_cb will continue to destroy other subsystems if any */
479 return;
480 } else {
481 SPDK_ERRLOG("Failed to destroy subsystem %s, rc %d\n", subsystem->subnqn, rc)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 481, __func__, "Failed to destroy subsystem %s, rc %d\n"
, subsystem->subnqn, rc)
;
482 }
483 }
484 }
485 spdk_bit_array_free(&tgt->subsystem_ids);
486 _nvmf_tgt_destroy_next_transport(tgt);
487}
488
489void
490spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt,
491 spdk_nvmf_tgt_destroy_done_fn cb_fn,
492 void *cb_arg)
493{
494 assert(!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING))((void) sizeof ((!(tgt->state == NVMF_TGT_PAUSING || tgt->
state == NVMF_TGT_RESUMING)) ? 1 : 0), __extension__ ({ if (!
(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING
)) ; else __assert_fail ("!(tgt->state == NVMF_TGT_PAUSING || tgt->state == NVMF_TGT_RESUMING)"
, "nvmf.c", 494, __extension__ __PRETTY_FUNCTION__); }))
;
495
496 tgt->destroy_cb_fn = cb_fn;
497 tgt->destroy_cb_arg = cb_arg;
498
499 TAILQ_REMOVE(&g_nvmf_tgts, tgt, link)do { __typeof__(tgt) _elm; if (((tgt)->link.tqe_next) != (
(void*)0)) (tgt)->link.tqe_next->link.tqe_prev = (tgt)->
link.tqe_prev; else (&g_nvmf_tgts)->tqh_last = (tgt)->
link.tqe_prev; *(tgt)->link.tqe_prev = (tgt)->link.tqe_next
; for ((_elm) = ((&g_nvmf_tgts)->tqh_first); (_elm); (
_elm) = ((_elm)->link.tqe_next)) { ((void) sizeof ((_elm !=
tgt) ? 1 : 0), __extension__ ({ if (_elm != tgt) ; else __assert_fail
("_elm != tgt", "nvmf.c", 499, __extension__ __PRETTY_FUNCTION__
); })); } } while (0)
;
500
501 spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb);
502}
503
504const char *
505spdk_nvmf_tgt_get_name(struct spdk_nvmf_tgt *tgt)
506{
507 return tgt->name;
508}
509
510struct spdk_nvmf_tgt *
511spdk_nvmf_get_tgt(const char *name)
512{
513 struct spdk_nvmf_tgt *tgt;
514 uint32_t num_targets = 0;
515
516 TAILQ_FOREACH(tgt, &g_nvmf_tgts, link)for ((tgt) = ((&g_nvmf_tgts)->tqh_first); (tgt); (tgt)
= ((tgt)->link.tqe_next))
{
517 if (name) {
518 if (!strncmp(tgt->name, name, NVMF_TGT_NAME_MAX_LENGTH256)) {
519 return tgt;
520 }
521 }
522 num_targets++;
523 }
524
525 /*
526 * special case. If there is only one target and
527 * no name was specified, return the only available
528 * target. If there is more than one target, name must
529 * be specified.
530 */
531 if (!name && num_targets == 1) {
532 return TAILQ_FIRST(&g_nvmf_tgts)((&g_nvmf_tgts)->tqh_first);
533 }
534
535 return NULL((void*)0);
536}
537
538struct spdk_nvmf_tgt *
539spdk_nvmf_get_first_tgt(void)
540{
541 return TAILQ_FIRST(&g_nvmf_tgts)((&g_nvmf_tgts)->tqh_first);
542}
543
544struct spdk_nvmf_tgt *
545spdk_nvmf_get_next_tgt(struct spdk_nvmf_tgt *prev)
546{
547 return TAILQ_NEXT(prev, link)((prev)->link.tqe_next);
548}
549
550static void
551nvmf_write_nvme_subsystem_config(struct spdk_json_write_ctx *w,
552 struct spdk_nvmf_subsystem *subsystem)
553{
554 struct spdk_nvmf_host *host;
555 struct spdk_nvmf_ns *ns;
556 struct spdk_nvmf_ns_opts ns_opts;
557 uint32_t max_namespaces;
558 struct spdk_nvmf_transport *transport;
559
560 assert(spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME)((void) sizeof ((spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME
) ? 1 : 0), __extension__ ({ if (spdk_nvmf_subsystem_get_type
(subsystem) == SPDK_NVMF_SUBTYPE_NVME) ; else __assert_fail (
"spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME"
, "nvmf.c", 560, __extension__ __PRETTY_FUNCTION__); }))
;
561
562 /* { */
563 spdk_json_write_object_begin(w);
564 spdk_json_write_named_string(w, "method", "nvmf_create_subsystem");
565
566 /* "params" : { */
567 spdk_json_write_named_object_begin(w, "params");
568 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
569 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem));
570 spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem));
571 spdk_json_write_named_string(w, "model_number", spdk_nvmf_subsystem_get_mn(subsystem));
572
573 max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem);
574 if (max_namespaces != 0) {
575 spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces);
576 }
577
578 spdk_json_write_named_uint32(w, "min_cntlid", spdk_nvmf_subsystem_get_min_cntlid(subsystem));
579 spdk_json_write_named_uint32(w, "max_cntlid", spdk_nvmf_subsystem_get_max_cntlid(subsystem));
580 spdk_json_write_named_bool(w, "ana_reporting", spdk_nvmf_subsystem_get_ana_reporting(subsystem));
581
582 /* } "params" */
583 spdk_json_write_object_end(w);
584
585 /* } */
586 spdk_json_write_object_end(w);
587
588 for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL((void*)0);
589 host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) {
590
591 spdk_json_write_object_begin(w);
592 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host");
593
594 /* "params" : { */
595 spdk_json_write_named_object_begin(w, "params");
596
597 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
598 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
599 if (host->dhchap_key != NULL((void*)0)) {
600 spdk_json_write_named_string(w, "dhchap_key",
601 spdk_key_get_name(host->dhchap_key));
602 }
603 if (host->dhchap_ctrlr_key != NULL((void*)0)) {
604 spdk_json_write_named_string(w, "dhchap_ctrlr_key",
605 spdk_key_get_name(host->dhchap_ctrlr_key));
606 }
607 TAILQ_FOREACH(transport, &subsystem->tgt->transports, link)for ((transport) = ((&subsystem->tgt->transports)->
tqh_first); (transport); (transport) = ((transport)->link.
tqe_next))
{
608 if (transport->ops->subsystem_dump_host != NULL((void*)0)) {
609 transport->ops->subsystem_dump_host(transport, subsystem, host->nqn, w);
610 }
611 }
612
613 /* } "params" */
614 spdk_json_write_object_end(w);
615
616 /* } */
617 spdk_json_write_object_end(w);
618 }
619
620 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL((void*)0);
621 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
622 spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts));
623
624 spdk_json_write_object_begin(w);
625 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns");
626
627 /* "params" : { */
628 spdk_json_write_named_object_begin(w, "params");
629
630 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
631
632 /* "namespace" : { */
633 spdk_json_write_named_object_begin(w, "namespace");
634
635 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
636 spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns)));
637
638 if (ns->ptpl_file != NULL((void*)0)) {
639 spdk_json_write_named_string(w, "ptpl_file", ns->ptpl_file);
640 }
641
642 if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) {
643 SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch")_Static_assert(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2,
"size mismatch")
;
644 spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"l" "X""%016"PRIX64"l" "X", from_be64(&ns_opts.nguid[0]),
645 from_be64(&ns_opts.nguid[8]));
646 }
647
648 if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) {
649 SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch")_Static_assert(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"
)
;
650 spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64"l" "X", from_be64(&ns_opts.eui64));
651 }
652
653 if (!spdk_uuid_is_null(&ns_opts.uuid)) {
654 spdk_json_write_named_uuid(w, "uuid", &ns_opts.uuid);
655 }
656
657 if (spdk_nvmf_subsystem_get_ana_reporting(subsystem)) {
658 spdk_json_write_named_uint32(w, "anagrpid", ns_opts.anagrpid);
659 }
660
661 spdk_json_write_named_bool(w, "no_auto_visible", !ns->always_visible);
662
663 /* "namespace" */
664 spdk_json_write_object_end(w);
665
666 /* } "params" */
667 spdk_json_write_object_end(w);
668
669 /* } */
670 spdk_json_write_object_end(w);
671
672 TAILQ_FOREACH(host, &ns->hosts, link)for ((host) = ((&ns->hosts)->tqh_first); (host); (host
) = ((host)->link.tqe_next))
{
673 spdk_json_write_object_begin(w);
674 spdk_json_write_named_string(w, "method", "nvmf_ns_add_host");
675 spdk_json_write_named_object_begin(w, "params");
676 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
677 spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns));
678 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
679 spdk_json_write_object_end(w);
680 spdk_json_write_object_end(w);
681 }
682 }
683}
684
685static void
686nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w,
687 struct spdk_nvmf_subsystem *subsystem)
688{
689 struct spdk_nvmf_subsystem_listener *listener;
690 struct spdk_nvmf_transport *transport;
691 const struct spdk_nvme_transport_id *trid;
692
693 if (spdk_nvmf_subsystem_get_type(subsystem) == SPDK_NVMF_SUBTYPE_NVME) {
694 nvmf_write_nvme_subsystem_config(w, subsystem);
695 }
696
697 for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL((void*)0);
698 listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
699 transport = listener->transport;
700 trid = spdk_nvmf_subsystem_listener_get_trid(listener);
701
702 spdk_json_write_object_begin(w);
703 spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener");
704
705 /* "params" : { */
706 spdk_json_write_named_object_begin(w, "params");
707
708 spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem));
709
710 spdk_json_write_named_object_begin(w, "listen_address");
711 nvmf_transport_listen_dump_trid(trid, w);
712 spdk_json_write_object_end(w);
713 if (transport->ops->listen_dump_opts) {
714 transport->ops->listen_dump_opts(transport, trid, w);
715 }
716
717 spdk_json_write_named_bool(w, "secure_channel", listener->opts.secure_channel);
718
719 if (listener->opts.sock_impl) {
720 spdk_json_write_named_string(w, "sock_impl", listener->opts.sock_impl);
721 }
722
723 /* } "params" */
724 spdk_json_write_object_end(w);
725
726 /* } */
727 spdk_json_write_object_end(w);
728 }
729}
730
731void
732spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt)
733{
734 struct spdk_nvmf_subsystem *subsystem;
735 struct spdk_nvmf_transport *transport;
736 struct spdk_nvmf_referral *referral;
737 struct spdk_nvmf_host *host;
738
739 spdk_json_write_object_begin(w);
740 spdk_json_write_named_string(w, "method", "nvmf_set_max_subsystems");
741
742 spdk_json_write_named_object_begin(w, "params");
743 spdk_json_write_named_uint32(w, "max_subsystems", tgt->max_subsystems);
744 spdk_json_write_object_end(w);
745
746 spdk_json_write_object_end(w);
747
748 spdk_json_write_object_begin(w);
749 spdk_json_write_named_string(w, "method", "nvmf_set_crdt");
750 spdk_json_write_named_object_begin(w, "params");
751 spdk_json_write_named_uint32(w, "crdt1", tgt->crdt[0]);
752 spdk_json_write_named_uint32(w, "crdt2", tgt->crdt[1]);
753 spdk_json_write_named_uint32(w, "crdt3", tgt->crdt[2]);
754 spdk_json_write_object_end(w);
755 spdk_json_write_object_end(w);
756
757 /* write transports */
758 TAILQ_FOREACH(transport, &tgt->transports, link)for ((transport) = ((&tgt->transports)->tqh_first);
(transport); (transport) = ((transport)->link.tqe_next))
{
1
Loop condition is false. Execution continues on line 765
759 spdk_json_write_object_begin(w);
760 spdk_json_write_named_string(w, "method", "nvmf_create_transport");
761 nvmf_transport_dump_opts(transport, w, true1);
762 spdk_json_write_object_end(w);
763 }
764
765 TAILQ_FOREACH(referral, &tgt->referrals, link)for ((referral) = ((&tgt->referrals)->tqh_first); (
referral); (referral) = ((referral)->link.tqe_next))
{
2
Value assigned to 'referral'
3
Assuming pointer value is null
4
Loop condition is false. Execution continues on line 782
766 spdk_json_write_object_begin(w);
767 spdk_json_write_named_string(w, "method", "nvmf_discovery_add_referral");
768
769 spdk_json_write_named_object_begin(w, "params");
770 spdk_json_write_named_object_begin(w, "address");
771 nvmf_transport_listen_dump_trid(&referral->trid, w);
772 spdk_json_write_object_end(w);
773 spdk_json_write_named_bool(w, "secure_channel",
774 referral->entry.treq.secure_channel ==
775 SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED);
776 spdk_json_write_named_string(w, "subnqn", referral->trid.subnqn);
777 spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_referral_get_allow_any_host(referral));
778 spdk_json_write_object_end(w);
779
780 spdk_json_write_object_end(w);
781 }
782 for (host = spdk_nvmf_referral_get_first_host(referral); host != NULL((void*)0);
5
Assuming 'host' is not equal to NULL
6
Loop condition is true. Entering loop body
783 host = spdk_nvmf_referral_get_next_host(referral, host)) {
784
785 spdk_json_write_object_begin(w);
786 spdk_json_write_named_string(w, "method", "nvmf_discovery_referral_add_host");
787
788 /* "params" : { */
789 spdk_json_write_named_object_begin(w, "params");
790
791 spdk_json_write_named_string(w, "nqn", spdk_nvmf_referral_get_nqn(referral));
792 spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host));
793 if (host->dhchap_key != NULL((void*)0)) {
7
Assuming field 'dhchap_key' is equal to NULL
8
Taking false branch
794 spdk_json_write_named_string(w, "dhchap_key",
795 spdk_key_get_name(host->dhchap_key));
796 }
797 if (host->dhchap_ctrlr_key != NULL((void*)0)) {
9
Assuming field 'dhchap_ctrlr_key' is equal to NULL
10
Taking false branch
798 spdk_json_write_named_string(w, "dhchap_ctrlr_key",
799 spdk_key_get_name(host->dhchap_ctrlr_key));
800 }
801 TAILQ_FOREACH(transport, &referral->tgt->transports, link)for ((transport) = ((&referral->tgt->transports)->
tqh_first); (transport); (transport) = ((transport)->link.
tqe_next))
{
11
Access to field 'tgt' results in a dereference of a null pointer (loaded from variable 'referral')
802 if (transport->ops->referral_dump_host != NULL((void*)0)) {
803 transport->ops->referral_dump_host(transport, referral, host->nqn, w);
804 }
805 }
806
807 /* } "params" */
808 spdk_json_write_object_end(w);
809
810 /* } */
811 spdk_json_write_object_end(w);
812 }
813
814 subsystem = spdk_nvmf_subsystem_get_first(tgt);
815 while (subsystem) {
816 nvmf_write_subsystem_config_json(w, subsystem);
817 subsystem = spdk_nvmf_subsystem_get_next(subsystem);
818 }
819}
820
821static void
822nvmf_listen_opts_copy(struct spdk_nvmf_listen_opts *opts,
823 const struct spdk_nvmf_listen_opts *opts_src, size_t opts_size)
824{
825 assert(opts)((void) sizeof ((opts) ? 1 : 0), __extension__ ({ if (opts) ;
else __assert_fail ("opts", "nvmf.c", 825, __extension__ __PRETTY_FUNCTION__
); }))
;
826 assert(opts_src)((void) sizeof ((opts_src) ? 1 : 0), __extension__ ({ if (opts_src
) ; else __assert_fail ("opts_src", "nvmf.c", 826, __extension__
__PRETTY_FUNCTION__); }))
;
827
828 opts->opts_size = opts_size;
829
830#define SET_FIELD(field) \
831 if (offsetof(struct spdk_nvmf_listen_opts, field)__builtin_offsetof(struct spdk_nvmf_listen_opts, field) + sizeof(opts->field) <= opts_size) { \
832 opts->field = opts_src->field; \
833 } \
834
835 SET_FIELD(transport_specific);
836 SET_FIELD(secure_channel);
837 SET_FIELD(ana_state);
838 SET_FIELD(sock_impl);
839#undef SET_FIELD
840
841 /* Do not remove this statement, you should always update this statement when you adding a new field,
842 * and do not forget to add the SET_FIELD statement for your added field. */
843 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listen_opts) == 32, "Incorrect size")_Static_assert(sizeof(struct spdk_nvmf_listen_opts) == 32, "Incorrect size"
)
;
844}
845
846void
847spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size)
848{
849 struct spdk_nvmf_listen_opts opts_local = {};
850
851 /* local version of opts should have defaults set here */
852 opts_local.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
853 nvmf_listen_opts_copy(opts, &opts_local, opts_size);
854}
855
856int
857spdk_nvmf_tgt_listen_ext(struct spdk_nvmf_tgt *tgt, const struct spdk_nvme_transport_id *trid,
858 struct spdk_nvmf_listen_opts *opts)
859{
860 struct spdk_nvmf_transport *transport;
861 int rc;
862 struct spdk_nvmf_listen_opts opts_local = {};
863
864 if (!opts) {
865 SPDK_ERRLOG("opts should not be NULL\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 865, __func__, "opts should not be NULL\n"
)
;
866 return -EINVAL22;
867 }
868
869 if (!opts->opts_size) {
870 SPDK_ERRLOG("The opts_size in opts structure should not be zero\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 870, __func__, "The opts_size in opts structure should not be zero\n"
)
;
871 return -EINVAL22;
872 }
873
874 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
875 if (!transport) {
876 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",spdk_log(SPDK_LOG_ERROR, "nvmf.c", 877, __func__, "Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n"
, trid->trstring)
877 trid->trstring)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 877, __func__, "Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n"
, trid->trstring)
;
878 return -EINVAL22;
879 }
880
881 nvmf_listen_opts_copy(&opts_local, opts, opts->opts_size);
882 rc = spdk_nvmf_transport_listen(transport, trid, &opts_local);
883 if (rc < 0) {
884 SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 884, __func__, "Unable to listen on address '%s'\n"
, trid->traddr)
;
885 }
886
887 return rc;
888}
889
890int
891spdk_nvmf_tgt_stop_listen(struct spdk_nvmf_tgt *tgt,
892 struct spdk_nvme_transport_id *trid)
893{
894 struct spdk_nvmf_transport *transport;
895 int rc;
896
897 transport = spdk_nvmf_tgt_get_transport(tgt, trid->trstring);
898 if (!transport) {
899 SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",spdk_log(SPDK_LOG_ERROR, "nvmf.c", 900, __func__, "Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n"
, trid->trstring)
900 trid->trstring)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 900, __func__, "Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n"
, trid->trstring)
;
901 return -EINVAL22;
902 }
903
904 rc = spdk_nvmf_transport_stop_listen(transport, trid);
905 if (rc < 0) {
906 SPDK_ERRLOG("Failed to stop listening on address '%s'\n", trid->traddr)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 906, __func__, "Failed to stop listening on address '%s'\n"
, trid->traddr)
;
907 return rc;
908 }
909 return 0;
910}
911
912struct spdk_nvmf_tgt_add_transport_ctx {
913 struct spdk_nvmf_tgt *tgt;
914 struct spdk_nvmf_transport *transport;
915 spdk_nvmf_tgt_add_transport_done_fn cb_fn;
916 void *cb_arg;
917 int status;
918};
919
920static void
921_nvmf_tgt_remove_transport_done(struct spdk_io_channel_iter *i, int status)
922{
923 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
924
925 ctx->cb_fn(ctx->cb_arg, ctx->status);
926 free(ctx);
927}
928
929static void
930_nvmf_tgt_remove_transport(struct spdk_io_channel_iter *i)
931{
932 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
933 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
934 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
935 struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
936
937 TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp)for ((tgroup) = (((&group->tgroups))->tqh_first); (
tgroup) && ((tmp) = (((tgroup))->link.tqe_next), 1
); (tgroup) = (tmp))
{
938 if (tgroup->transport == ctx->transport) {
939 TAILQ_REMOVE(&group->tgroups, tgroup, link)do { __typeof__(tgroup) _elm; if (((tgroup)->link.tqe_next
) != ((void*)0)) (tgroup)->link.tqe_next->link.tqe_prev
= (tgroup)->link.tqe_prev; else (&group->tgroups)->
tqh_last = (tgroup)->link.tqe_prev; *(tgroup)->link.tqe_prev
= (tgroup)->link.tqe_next; for ((_elm) = ((&group->
tgroups)->tqh_first); (_elm); (_elm) = ((_elm)->link.tqe_next
)) { ((void) sizeof ((_elm != tgroup) ? 1 : 0), __extension__
({ if (_elm != tgroup) ; else __assert_fail ("_elm != tgroup"
, "nvmf.c", 939, __extension__ __PRETTY_FUNCTION__); })); } }
while (0)
;
940 nvmf_transport_poll_group_destroy(tgroup);
941 }
942 }
943
944 spdk_for_each_channel_continue(i, 0);
945}
946
947static void
948_nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status)
949{
950 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
951
952 if (status) {
953 ctx->status = status;
954 spdk_for_each_channel(ctx->tgt,
955 _nvmf_tgt_remove_transport,
956 ctx,
957 _nvmf_tgt_remove_transport_done);
958 return;
959 }
960
961 ctx->transport->tgt = ctx->tgt;
962 TAILQ_INSERT_TAIL(&ctx->tgt->transports, ctx->transport, link)do { (ctx->transport)->link.tqe_next = ((void*)0); (ctx
->transport)->link.tqe_prev = (&ctx->tgt->transports
)->tqh_last; *(&ctx->tgt->transports)->tqh_last
= (ctx->transport); (&ctx->tgt->transports)->
tqh_last = &(ctx->transport)->link.tqe_next; } while
( 0)
;
963 ctx->cb_fn(ctx->cb_arg, status);
964 free(ctx);
965}
966
967static void
968_nvmf_tgt_add_transport(struct spdk_io_channel_iter *i)
969{
970 struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
971 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
972 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
973 int rc;
974
975 rc = nvmf_poll_group_add_transport(group, ctx->transport);
976 spdk_for_each_channel_continue(i, rc);
977}
978
979void
980spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt,
981 struct spdk_nvmf_transport *transport,
982 spdk_nvmf_tgt_add_transport_done_fn cb_fn,
983 void *cb_arg)
984{
985 struct spdk_nvmf_tgt_add_transport_ctx *ctx;
986
987 SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_add_transport, transport, tgt->name);
988
989 if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->name)) {
990 cb_fn(cb_arg, -EEXIST17);
991 return; /* transport already created */
992 }
993
994 ctx = calloc(1, sizeof(*ctx));
995 if (!ctx) {
996 cb_fn(cb_arg, -ENOMEM12);
997 return;
998 }
999
1000 ctx->tgt = tgt;
1001 ctx->transport = transport;
1002 ctx->cb_fn = cb_fn;
1003 ctx->cb_arg = cb_arg;
1004
1005 spdk_for_each_channel(tgt,
1006 _nvmf_tgt_add_transport,
1007 ctx,
1008 _nvmf_tgt_add_transport_done);
1009}
1010
1011struct nvmf_tgt_pause_ctx {
1012 struct spdk_nvmf_tgt *tgt;
1013 spdk_nvmf_tgt_pause_polling_cb_fn cb_fn;
1014 void *cb_arg;
1015};
1016
1017static void
1018_nvmf_tgt_pause_polling_done(struct spdk_io_channel_iter *i, int status)
1019{
1020 struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1021
1022 ctx->tgt->state = NVMF_TGT_PAUSED;
1023
1024 ctx->cb_fn(ctx->cb_arg, status);
1025 free(ctx);
1026}
1027
1028static void
1029_nvmf_tgt_pause_polling(struct spdk_io_channel_iter *i)
1030{
1031 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
1032 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
1033
1034 spdk_poller_unregister(&group->poller);
1035
1036 spdk_for_each_channel_continue(i, 0);
1037}
1038
1039int
1040spdk_nvmf_tgt_pause_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_pause_polling_cb_fn cb_fn,
1041 void *cb_arg)
1042{
1043 struct nvmf_tgt_pause_ctx *ctx;
1044
1045 SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_pause_polling, tgt, tgt->name);
1046
1047 switch (tgt->state) {
1048 case NVMF_TGT_PAUSING:
1049 case NVMF_TGT_RESUMING:
1050 return -EBUSY16;
1051 case NVMF_TGT_RUNNING:
1052 break;
1053 default:
1054 return -EINVAL22;
1055 }
1056
1057 ctx = calloc(1, sizeof(*ctx));
1058 if (!ctx) {
1059 return -ENOMEM12;
1060 }
1061
1062
1063 tgt->state = NVMF_TGT_PAUSING;
1064
1065 ctx->tgt = tgt;
1066 ctx->cb_fn = cb_fn;
1067 ctx->cb_arg = cb_arg;
1068
1069 spdk_for_each_channel(tgt,
1070 _nvmf_tgt_pause_polling,
1071 ctx,
1072 _nvmf_tgt_pause_polling_done);
1073 return 0;
1074}
1075
1076static void
1077_nvmf_tgt_resume_polling_done(struct spdk_io_channel_iter *i, int status)
1078{
1079 struct nvmf_tgt_pause_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1080
1081 ctx->tgt->state = NVMF_TGT_RUNNING;
1082
1083 ctx->cb_fn(ctx->cb_arg, status);
1084 free(ctx);
1085}
1086
1087static void
1088_nvmf_tgt_resume_polling(struct spdk_io_channel_iter *i)
1089{
1090 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
1091 struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
1092
1093 assert(group->poller == NULL)((void) sizeof ((group->poller == ((void*)0)) ? 1 : 0), __extension__
({ if (group->poller == ((void*)0)) ; else __assert_fail (
"group->poller == NULL", "nvmf.c", 1093, __extension__ __PRETTY_FUNCTION__
); }))
;
1094 group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0)spdk_poller_register_named(nvmf_poll_group_poll, group, 0, "nvmf_poll_group_poll"
)
;
1095
1096 spdk_for_each_channel_continue(i, 0);
1097}
1098
1099int
1100spdk_nvmf_tgt_resume_polling(struct spdk_nvmf_tgt *tgt, spdk_nvmf_tgt_resume_polling_cb_fn cb_fn,
1101 void *cb_arg)
1102{
1103 struct nvmf_tgt_pause_ctx *ctx;
1104
1105 SPDK_DTRACE_PROBE2_TICKS(nvmf_tgt_resume_polling, tgt, tgt->name);
1106
1107 switch (tgt->state) {
1108 case NVMF_TGT_PAUSING:
1109 case NVMF_TGT_RESUMING:
1110 return -EBUSY16;
1111 case NVMF_TGT_PAUSED:
1112 break;
1113 default:
1114 return -EINVAL22;
1115 }
1116
1117 ctx = calloc(1, sizeof(*ctx));
1118 if (!ctx) {
1119 return -ENOMEM12;
1120 }
1121
1122 tgt->state = NVMF_TGT_RESUMING;
1123
1124 ctx->tgt = tgt;
1125 ctx->cb_fn = cb_fn;
1126 ctx->cb_arg = cb_arg;
1127
1128 spdk_for_each_channel(tgt,
1129 _nvmf_tgt_resume_polling,
1130 ctx,
1131 _nvmf_tgt_resume_polling_done);
1132 return 0;
1133}
1134
1135struct spdk_nvmf_subsystem *
1136spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
1137{
1138 struct spdk_nvmf_subsystem subsystem;
1139
1140 if (!subnqn) {
1141 return NULL((void*)0);
1142 }
1143
1144 /* Ensure that subnqn is null terminated */
1145 if (!memchr(subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN223 + 1)) {
1146 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1146, __func__, "Connect SUBNQN is not null terminated\n"
)
;
1147 return NULL((void*)0);
1148 }
1149
1150 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
1151 return RB_FIND(subsystem_tree, &tgt->subsystems, &subsystem)subsystem_tree_RB_FIND(&tgt->subsystems, &subsystem
)
;
1152}
1153
1154struct spdk_nvmf_referral *
1155spdk_nvmf_tgt_find_referral(struct spdk_nvmf_tgt *tgt, const char *subnqn)
1156{
1157 struct spdk_nvmf_referral *referral;
1158
1159 TAILQ_FOREACH(referral, &tgt->referrals, link)for ((referral) = ((&tgt->referrals)->tqh_first); (
referral); (referral) = ((referral)->link.tqe_next))
{
1160 if (!strncmp(referral->trid.subnqn, subnqn, SPDK_NVMF_NQN_MAX_LEN223)) {
1161 return referral;
1162 }
1163 }
1164
1165 return 0;
1166
1167}
1168
1169struct spdk_nvmf_transport *
1170spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
1171{
1172 struct spdk_nvmf_transport *transport;
1173
1174 TAILQ_FOREACH(transport, &tgt->transports, link)for ((transport) = ((&tgt->transports)->tqh_first);
(transport); (transport) = ((transport)->link.tqe_next))
{
1175 if (!strncasecmp(transport->ops->name, transport_name, SPDK_NVMF_TRSTRING_MAX_LEN32)) {
1176 return transport;
1177 }
1178 }
1179 return NULL((void*)0);
1180}
1181
1182struct nvmf_new_qpair_ctx {
1183 struct spdk_nvmf_qpair *qpair;
1184 struct spdk_nvmf_poll_group *group;
1185};
1186
1187static void
1188_nvmf_poll_group_add(void *_ctx)
1189{
1190 struct nvmf_new_qpair_ctx *ctx = _ctx;
1191 struct spdk_nvmf_qpair *qpair = ctx->qpair;
1192 struct spdk_nvmf_poll_group *group = ctx->group;
1193
1194 free(_ctx);
1195
1196 if (spdk_nvmf_poll_group_add(group, qpair) != 0) {
1197 SPDK_ERRLOG("Unable to add the qpair to a poll group.\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1197, __func__, "Unable to add the qpair to a poll group.\n"
)
;
1198 spdk_nvmf_qpair_disconnect(qpair);
1199 }
1200}
1201
1202void
1203spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
1204{
1205 struct spdk_nvmf_poll_group *group;
1206 struct nvmf_new_qpair_ctx *ctx;
1207
1208 group = spdk_nvmf_get_optimal_poll_group(qpair);
1209 if (group == NULL((void*)0)) {
1210 if (tgt->next_poll_group == NULL((void*)0)) {
1211 tgt->next_poll_group = TAILQ_FIRST(&tgt->poll_groups)((&tgt->poll_groups)->tqh_first);
1212 if (tgt->next_poll_group == NULL((void*)0)) {
1213 SPDK_ERRLOG("No poll groups exist.\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1213, __func__, "No poll groups exist.\n"
)
;
1214 spdk_nvmf_qpair_disconnect(qpair);
1215 return;
1216 }
1217 }
1218 group = tgt->next_poll_group;
1219 tgt->next_poll_group = TAILQ_NEXT(group, link)((group)->link.tqe_next);
1220 }
1221
1222 ctx = calloc(1, sizeof(*ctx));
1223 if (!ctx) {
1224 SPDK_ERRLOG("Unable to send message to poll group.\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1224, __func__, "Unable to send message to poll group.\n"
)
;
1225 spdk_nvmf_qpair_disconnect(qpair);
1226 return;
1227 }
1228
1229 ctx->qpair = qpair;
1230 ctx->group = group;
1231
1232 pthread_mutex_lock(&group->mutex);
1233 group->current_unassociated_qpairs++;
1234 pthread_mutex_unlock(&group->mutex);
1235
1236 spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx);
1237}
1238
1239struct spdk_nvmf_poll_group *
1240spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
1241{
1242 struct spdk_io_channel *ch;
1243
1244 ch = spdk_get_io_channel(tgt);
1245 if (!ch) {
1246 SPDK_ERRLOG("Unable to get I/O channel for target\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1246, __func__, "Unable to get I/O channel for target\n"
)
;
1247 return NULL((void*)0);
1248 }
1249
1250 return spdk_io_channel_get_ctx(ch);
1251}
1252
1253void
1254spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group,
1255 spdk_nvmf_poll_group_destroy_done_fn cb_fn,
1256 void *cb_arg)
1257{
1258 assert(group->destroy_cb_fn == NULL)((void) sizeof ((group->destroy_cb_fn == ((void*)0)) ? 1 :
0), __extension__ ({ if (group->destroy_cb_fn == ((void*)
0)) ; else __assert_fail ("group->destroy_cb_fn == NULL", "nvmf.c"
, 1258, __extension__ __PRETTY_FUNCTION__); }))
;
1259 group->destroy_cb_fn = cb_fn;
1260 group->destroy_cb_arg = cb_arg;
1261
1262 /* This function will put the io_channel associated with this poll group */
1263 nvmf_tgt_destroy_poll_group_qpairs(group);
1264}
1265
1266int
1267spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
1268 struct spdk_nvmf_qpair *qpair)
1269{
1270 int rc;
1271 struct spdk_nvmf_transport_poll_group *tgroup;
1272
1273 TAILQ_INIT(&qpair->outstanding)do { (&qpair->outstanding)->tqh_first = ((void*)0);
(&qpair->outstanding)->tqh_last = &(&qpair
->outstanding)->tqh_first; } while ( 0)
;
1274 qpair->group = group;
1275 qpair->ctrlr = NULL((void*)0);
1276 qpair->disconnect_started = false0;
1277
1278 tgroup = nvmf_get_transport_poll_group(group, qpair->transport);
1279 if (tgroup == NULL((void*)0)) {
1280 return -1;
1281 }
1282
1283 rc = nvmf_transport_poll_group_add(tgroup, qpair);
1284
1285 /* We add the qpair to the group only it is successfully added into the tgroup */
1286 if (rc == 0) {
1287 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread));
1288 TAILQ_INSERT_TAIL(&group->qpairs, qpair, link)do { (qpair)->link.tqe_next = ((void*)0); (qpair)->link
.tqe_prev = (&group->qpairs)->tqh_last; *(&group
->qpairs)->tqh_last = (qpair); (&group->qpairs)->
tqh_last = &(qpair)->link.tqe_next; } while ( 0)
;
1289 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_CONNECTING);
1290 }
1291
1292 return rc;
1293}
1294
1295static void
1296_nvmf_ctrlr_destruct(void *ctx)
1297{
1298 struct spdk_nvmf_ctrlr *ctrlr = ctx;
1299
1300 nvmf_ctrlr_destruct(ctrlr);
1301}
1302
1303static void
1304_nvmf_ctrlr_free_from_qpair(void *ctx)
1305{
1306 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1307 struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr;
1308 uint32_t count;
1309
1310 spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid);
1311 SPDK_DEBUGLOG(nvmf, "qpair_mask cleared, qid %u\n", qpair_ctx->qid)do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1311, __func__
, "qpair_mask cleared, qid %u\n", qpair_ctx->qid); } } while
(0)
;
1312 count = spdk_bit_array_count_set(ctrlr->qpair_mask);
1313 if (count == 0) {
1314 assert(!ctrlr->in_destruct)((void) sizeof ((!ctrlr->in_destruct) ? 1 : 0), __extension__
({ if (!ctrlr->in_destruct) ; else __assert_fail ("!ctrlr->in_destruct"
, "nvmf.c", 1314, __extension__ __PRETTY_FUNCTION__); }))
;
1315 SPDK_DEBUGLOG(nvmf, "Last qpair %u, destroy ctrlr 0x%hx\n", qpair_ctx->qid, ctrlr->cntlid)do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1315, __func__
, "Last qpair %u, destroy ctrlr 0x%hx\n", qpair_ctx->qid, ctrlr
->cntlid); } } while (0)
;
1316 ctrlr->in_destruct = true1;
1317 spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr);
1318 }
1319 free(qpair_ctx);
1320}
1321
1322static void
1323_nvmf_transport_qpair_fini_complete(void *cb_ctx)
1324{
1325 struct nvmf_qpair_disconnect_ctx *qpair_ctx = cb_ctx;
1326 struct spdk_nvmf_ctrlr *ctrlr;
1327
1328 ctrlr = qpair_ctx->ctrlr;
1329 SPDK_DEBUGLOG(nvmf, "Finish destroying qid %u\n", qpair_ctx->qid)do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1329, __func__
, "Finish destroying qid %u\n", qpair_ctx->qid); } } while
(0)
;
1330
1331 if (ctrlr) {
1332 if (qpair_ctx->qid == 0) {
1333 /* Admin qpair is removed, so set the pointer to NULL.
1334 * This operation is safe since we are on ctrlr thread now, admin qpair's thread is the same
1335 * as controller's thread */
1336 assert(ctrlr->thread == spdk_get_thread())((void) sizeof ((ctrlr->thread == spdk_get_thread()) ? 1 :
0), __extension__ ({ if (ctrlr->thread == spdk_get_thread
()) ; else __assert_fail ("ctrlr->thread == spdk_get_thread()"
, "nvmf.c", 1336, __extension__ __PRETTY_FUNCTION__); }))
;
1337 ctrlr->admin_qpair = NULL((void*)0);
1338 }
1339 /* Free qpair id from controller's bit mask and destroy the controller if it is the last qpair */
1340 if (ctrlr->thread) {
1341 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_free_from_qpair, qpair_ctx);
1342 } else {
1343 _nvmf_ctrlr_free_from_qpair(qpair_ctx);
1344 }
1345 } else {
1346 free(qpair_ctx);
1347 }
1348}
1349
1350void
1351spdk_nvmf_poll_group_remove(struct spdk_nvmf_qpair *qpair)
1352{
1353 struct spdk_nvmf_transport_poll_group *tgroup;
1354 int rc;
1355
1356 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_remove_qpair, qpair,
1357 spdk_thread_get_id(qpair->group->thread));
1358 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ERROR);
1359
1360 /* Find the tgroup and remove the qpair from the tgroup */
1361 tgroup = nvmf_get_transport_poll_group(qpair->group, qpair->transport);
1362 if (tgroup != NULL((void*)0)) {
1363 rc = nvmf_transport_poll_group_remove(tgroup, qpair);
1364 if (rc && (rc != ENOTSUP95)) {
1365 SPDK_ERRLOG("Cannot remove qpair=%p from transport group=%p\n",spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1366, __func__, "Cannot remove qpair=%p from transport group=%p\n"
, qpair, tgroup)
1366 qpair, tgroup)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1366, __func__, "Cannot remove qpair=%p from transport group=%p\n"
, qpair, tgroup)
;
1367 }
1368 }
1369
1370 TAILQ_REMOVE(&qpair->group->qpairs, qpair, link)do { __typeof__(qpair) _elm; if (((qpair)->link.tqe_next) !=
((void*)0)) (qpair)->link.tqe_next->link.tqe_prev = (qpair
)->link.tqe_prev; else (&qpair->group->qpairs)->
tqh_last = (qpair)->link.tqe_prev; *(qpair)->link.tqe_prev
= (qpair)->link.tqe_next; for ((_elm) = ((&qpair->
group->qpairs)->tqh_first); (_elm); (_elm) = ((_elm)->
link.tqe_next)) { ((void) sizeof ((_elm != qpair) ? 1 : 0), __extension__
({ if (_elm != qpair) ; else __assert_fail ("_elm != qpair",
"nvmf.c", 1370, __extension__ __PRETTY_FUNCTION__); })); } }
while (0)
;
1371 qpair->group = NULL((void*)0);
1372}
1373
1374static void
1375_nvmf_qpair_sgroup_req_clean(struct spdk_nvmf_subsystem_poll_group *sgroup,
1376 const struct spdk_nvmf_qpair *qpair)
1377{
1378 struct spdk_nvmf_request *req, *tmp;
1379 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp)for ((req) = (((&sgroup->queued))->tqh_first); (req
) && ((tmp) = (((req))->link.tqe_next), 1); (req) =
(tmp))
{
1380 if (req->qpair == qpair) {
1381 TAILQ_REMOVE(&sgroup->queued, req, link)do { __typeof__(req) _elm; if (((req)->link.tqe_next) != (
(void*)0)) (req)->link.tqe_next->link.tqe_prev = (req)->
link.tqe_prev; else (&sgroup->queued)->tqh_last = (
req)->link.tqe_prev; *(req)->link.tqe_prev = (req)->
link.tqe_next; for ((_elm) = ((&sgroup->queued)->tqh_first
); (_elm); (_elm) = ((_elm)->link.tqe_next)) { ((void) sizeof
((_elm != req) ? 1 : 0), __extension__ ({ if (_elm != req) ;
else __assert_fail ("_elm != req", "nvmf.c", 1381, __extension__
__PRETTY_FUNCTION__); })); } } while (0)
;
1382 if (nvmf_transport_req_free(req)) {
1383 SPDK_ERRLOG("Transport request free error!\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1383, __func__, "Transport request free error!\n"
)
;
1384 }
1385 }
1386 }
1387}
1388
1389static void
1390_nvmf_qpair_destroy(void *ctx, int status)
1391{
1392 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1393 struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair;
1394 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
1395 struct spdk_nvmf_subsystem_poll_group *sgroup;
1396 uint32_t sid;
1397
1398 assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING)((void) sizeof ((qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING
) ? 1 : 0), __extension__ ({ if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING
) ; else __assert_fail ("qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING"
, "nvmf.c", 1398, __extension__ __PRETTY_FUNCTION__); }))
;
1399 qpair_ctx->qid = qpair->qid;
1400
1401 if (qpair->connect_received) {
1402 if (0 == qpair->qid) {
1403 assert(qpair->group->stat.current_admin_qpairs > 0)((void) sizeof ((qpair->group->stat.current_admin_qpairs
> 0) ? 1 : 0), __extension__ ({ if (qpair->group->stat
.current_admin_qpairs > 0) ; else __assert_fail ("qpair->group->stat.current_admin_qpairs > 0"
, "nvmf.c", 1403, __extension__ __PRETTY_FUNCTION__); }))
;
1404 qpair->group->stat.current_admin_qpairs--;
1405 } else {
1406 assert(qpair->group->stat.current_io_qpairs > 0)((void) sizeof ((qpair->group->stat.current_io_qpairs >
0) ? 1 : 0), __extension__ ({ if (qpair->group->stat.current_io_qpairs
> 0) ; else __assert_fail ("qpair->group->stat.current_io_qpairs > 0"
, "nvmf.c", 1406, __extension__ __PRETTY_FUNCTION__); }))
;
1407 qpair->group->stat.current_io_qpairs--;
1408 }
1409 } else {
1410 pthread_mutex_lock(&qpair->group->mutex);
1411 qpair->group->current_unassociated_qpairs--;
1412 pthread_mutex_unlock(&qpair->group->mutex);
1413 }
1414
1415 if (ctrlr) {
1416 sgroup = &qpair->group->sgroups[ctrlr->subsys->id];
1417 _nvmf_qpair_sgroup_req_clean(sgroup, qpair);
1418 } else {
1419 for (sid = 0; sid < qpair->group->num_sgroups; sid++) {
1420 sgroup = &qpair->group->sgroups[sid];
1421 assert(sgroup != NULL)((void) sizeof ((sgroup != ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup != ((void*)0)) ; else __assert_fail ("sgroup != NULL"
, "nvmf.c", 1421, __extension__ __PRETTY_FUNCTION__); }))
;
1422 _nvmf_qpair_sgroup_req_clean(sgroup, qpair);
1423 }
1424 }
1425
1426 nvmf_qpair_auth_destroy(qpair);
1427 qpair_ctx->ctrlr = ctrlr;
1428 spdk_nvmf_poll_group_remove(qpair);
1429 nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
1430}
1431
1432static void
1433_nvmf_qpair_disconnect_msg(void *ctx)
1434{
1435 struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx;
1436
1437 spdk_nvmf_qpair_disconnect(qpair_ctx->qpair);
1438 free(ctx);
1439}
1440
1441int
1442spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair)
1443{
1444 struct spdk_nvmf_poll_group *group = qpair->group;
1445 struct nvmf_qpair_disconnect_ctx *qpair_ctx;
1446
1447 if (__atomic_test_and_set(&qpair->disconnect_started, __ATOMIC_RELAXED0)) {
1448 return -EINPROGRESS115;
1449 }
1450
1451 /* If we get a qpair in the uninitialized state, we can just destroy it immediately */
1452 if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
1453 nvmf_transport_qpair_fini(qpair, NULL((void*)0), NULL((void*)0));
1454 return 0;
1455 }
1456
1457 assert(group != NULL)((void) sizeof ((group != ((void*)0)) ? 1 : 0), __extension__
({ if (group != ((void*)0)) ; else __assert_fail ("group != NULL"
, "nvmf.c", 1457, __extension__ __PRETTY_FUNCTION__); }))
;
1458 if (spdk_get_thread() != group->thread) {
1459 /* clear the atomic so we can set it on the next call on the proper thread. */
1460 __atomic_clear(&qpair->disconnect_started, __ATOMIC_RELAXED0);
1461 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
1462 if (!qpair_ctx) {
1463 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1463, __func__, "Unable to allocate context for nvmf_qpair_disconnect\n"
)
;
1464 return -ENOMEM12;
1465 }
1466 qpair_ctx->qpair = qpair;
1467 spdk_thread_send_msg(group->thread, _nvmf_qpair_disconnect_msg, qpair_ctx);
1468 return 0;
1469 }
1470
1471 SPDK_DTRACE_PROBE2_TICKS(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread));
1472 assert(spdk_nvmf_qpair_is_active(qpair))((void) sizeof ((spdk_nvmf_qpair_is_active(qpair)) ? 1 : 0), __extension__
({ if (spdk_nvmf_qpair_is_active(qpair)) ; else __assert_fail
("spdk_nvmf_qpair_is_active(qpair)", "nvmf.c", 1472, __extension__
__PRETTY_FUNCTION__); }))
;
1473 nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING);
1474
1475 qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
1476 if (!qpair_ctx) {
1477 SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1477, __func__, "Unable to allocate context for nvmf_qpair_disconnect\n"
)
;
1478 return -ENOMEM12;
1479 }
1480
1481 qpair_ctx->qpair = qpair;
1482
1483 /* Check for outstanding I/O */
1484 if (!TAILQ_EMPTY(&qpair->outstanding)((&qpair->outstanding)->tqh_first == ((void*)0))) {
1485 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_drain_qpair, qpair, spdk_thread_get_id(group->thread));
1486 qpair->state_cb = _nvmf_qpair_destroy;
1487 qpair->state_cb_arg = qpair_ctx;
1488 nvmf_qpair_abort_pending_zcopy_reqs(qpair);
1489 nvmf_qpair_free_aer(qpair);
1490 return 0;
1491 }
1492
1493 _nvmf_qpair_destroy(qpair_ctx, 0);
1494
1495 return 0;
1496}
1497
1498int
1499spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
1500 struct spdk_nvme_transport_id *trid)
1501{
1502 memset(trid, 0, sizeof(*trid));
1503 return nvmf_transport_qpair_get_peer_trid(qpair, trid);
1504}
1505
1506int
1507spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
1508 struct spdk_nvme_transport_id *trid)
1509{
1510 memset(trid, 0, sizeof(*trid));
1511 return nvmf_transport_qpair_get_local_trid(qpair, trid);
1512}
1513
1514int
1515spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
1516 struct spdk_nvme_transport_id *trid)
1517{
1518 memset(trid, 0, sizeof(*trid));
1519 return nvmf_transport_qpair_get_listen_trid(qpair, trid);
1520}
1521
1522static int
1523poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
1524 struct spdk_nvmf_subsystem *subsystem)
1525{
1526 struct spdk_nvmf_subsystem_poll_group *sgroup;
1527 uint32_t i, j;
1528 struct spdk_nvmf_ns *ns;
1529 struct spdk_nvmf_registrant *reg, *tmp;
1530 struct spdk_io_channel *ch;
1531 struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
1532 struct spdk_nvmf_ctrlr *ctrlr;
1533 bool_Bool ns_changed;
1534
1535 /* Make sure our poll group has memory for this subsystem allocated */
1536 if (subsystem->id >= group->num_sgroups) {
1537 return -ENOMEM12;
1538 }
1539
1540 sgroup = &group->sgroups[subsystem->id];
1541
1542 /* Make sure the array of namespace information is the correct size */
1543 if (sgroup->num_ns == 0 && subsystem->max_nsid > 0) {
1544 /* First allocation */
1545 sgroup->ns_info = calloc(subsystem->max_nsid, sizeof(struct spdk_nvmf_subsystem_pg_ns_info));
1546 if (!sgroup->ns_info) {
1547 return -ENOMEM12;
1548 }
1549 sgroup->num_ns = subsystem->max_nsid;
1550 }
1551
1552 ns_changed = false0;
1553
1554 /* Detect bdevs that were added or removed */
1555 for (i = 0; i < sgroup->num_ns; i++) {
1556 ns = subsystem->ns[i];
1557 ns_info = &sgroup->ns_info[i];
1558 ch = ns_info->channel;
1559
1560 if (ns == NULL((void*)0) && ch == NULL((void*)0)) {
1561 /* Both NULL. Leave empty */
1562 } else if (ns == NULL((void*)0) && ch != NULL((void*)0)) {
1563 /* There was a channel here, but the namespace is gone. */
1564 ns_changed = true1;
1565 spdk_put_io_channel(ch);
1566 ns_info->channel = NULL((void*)0);
1567 } else if (ns != NULL((void*)0) && ch == NULL((void*)0)) {
1568 /* A namespace appeared but there is no channel yet */
1569 ns_changed = true1;
1570 ch = spdk_bdev_get_io_channel(ns->desc);
1571 if (ch == NULL((void*)0)) {
1572 SPDK_ERRLOG("Could not allocate I/O channel.\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1572, __func__, "Could not allocate I/O channel.\n"
)
;
1573 return -ENOMEM12;
1574 }
1575 ns_info->channel = ch;
1576 } else if (spdk_uuid_compare(&ns_info->uuid, spdk_bdev_get_uuid(ns->bdev)) != 0) {
1577 /* A namespace was here before, but was replaced by a new one. */
1578 ns_changed = true1;
1579 spdk_put_io_channel(ns_info->channel);
1580 memset(ns_info, 0, sizeof(*ns_info));
1581
1582 ch = spdk_bdev_get_io_channel(ns->desc);
1583 if (ch == NULL((void*)0)) {
1584 SPDK_ERRLOG("Could not allocate I/O channel.\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1584, __func__, "Could not allocate I/O channel.\n"
)
;
1585 return -ENOMEM12;
1586 }
1587 ns_info->channel = ch;
1588 } else if (ns_info->num_blocks != spdk_bdev_get_num_blocks(ns->bdev)) {
1589 /* Namespace is still there but size has changed */
1590 SPDK_DEBUGLOG(nvmf, "Namespace resized: subsystem_id %u,"do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
1591 " nsid %u, pg %p, old %" PRIu64 ", new %" PRIu64 "\n",do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
1592 subsystem->id,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
1593 ns->nsid,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
1594 group,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
1595 ns_info->num_blocks,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
1596 spdk_bdev_get_num_blocks(ns->bdev))do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1596, __func__
, "Namespace resized: subsystem_id %u," " nsid %u, pg %p, old %"
"l" "u" ", new %" "l" "u" "\n", subsystem->id, ns->nsid
, group, ns_info->num_blocks, spdk_bdev_get_num_blocks(ns->
bdev)); } } while (0)
;
1597 ns_changed = true1;
1598 } else if (ns_info->anagrpid != ns->anagrpid) {
1599 /* Namespace is still there but ANA group ID has changed */
1600 SPDK_DEBUGLOG(nvmf, "ANA group ID changed: subsystem_id %u,"do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
1601 "nsid %u, pg %p, old %u, new %u\n",do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
1602 subsystem->id,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
1603 ns->nsid,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
1604 group,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
1605 ns_info->anagrpid,do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
1606 ns->anagrpid)do { extern struct spdk_log_flag SPDK_LOG_nvmf; if (SPDK_LOG_nvmf
.enabled) { spdk_log(SPDK_LOG_DEBUG, "nvmf.c", 1606, __func__
, "ANA group ID changed: subsystem_id %u," "nsid %u, pg %p, old %u, new %u\n"
, subsystem->id, ns->nsid, group, ns_info->anagrpid,
ns->anagrpid); } } while (0)
;
1607 ns_changed = true1;
1608 }
1609
1610 if (ns == NULL((void*)0)) {
1611 memset(ns_info, 0, sizeof(*ns_info));
1612 } else {
1613 ns_info->uuid = *spdk_bdev_get_uuid(ns->bdev);
1614 ns_info->num_blocks = spdk_bdev_get_num_blocks(ns->bdev);
1615 ns_info->anagrpid = ns->anagrpid;
1616 ns_info->crkey = ns->crkey;
1617 ns_info->rtype = ns->rtype;
1618 if (ns->holder) {
1619 ns_info->holder_id = ns->holder->hostid;
1620 }
1621
1622 memset(&ns_info->reg_hostid, 0, SPDK_NVMF_MAX_NUM_REGISTRANTS16 * sizeof(struct spdk_uuid));
1623 j = 0;
1624 TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp)for ((reg) = (((&ns->registrants))->tqh_first); (reg
) && ((tmp) = (((reg))->link.tqe_next), 1); (reg) =
(tmp))
{
1625 if (j >= SPDK_NVMF_MAX_NUM_REGISTRANTS16) {
1626 SPDK_ERRLOG("Maximum %u registrants can support.\n", SPDK_NVMF_MAX_NUM_REGISTRANTS)spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1626, __func__, "Maximum %u registrants can support.\n"
, 16)
;
1627 return -EINVAL22;
1628 }
1629 ns_info->reg_hostid[j++] = reg->hostid;
1630 }
1631 }
1632 }
1633
1634 if (ns_changed) {
1635 TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link)for ((ctrlr) = ((&subsystem->ctrlrs)->tqh_first); (
ctrlr); (ctrlr) = ((ctrlr)->link.tqe_next))
{
1636 if (ctrlr->thread != spdk_get_thread()) {
1637 continue;
1638 }
1639 /* It is possible that a ctrlr was added but the admin_qpair hasn't been
1640 * assigned yet.
1641 */
1642 if (!ctrlr->admin_qpair) {
1643 continue;
1644 }
1645 if (ctrlr->admin_qpair->group == group) {
1646 nvmf_ctrlr_async_event_ns_notice(ctrlr);
1647 nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
1648 }
1649 }
1650 }
1651
1652 return 0;
1653}
1654
1655int
1656nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
1657 struct spdk_nvmf_subsystem *subsystem)
1658{
1659 return poll_group_update_subsystem(group, subsystem);
1660}
1661
1662int
1663nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
1664 struct spdk_nvmf_subsystem *subsystem,
1665 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1666{
1667 int rc = 0;
1668 struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id];
1669 struct spdk_nvmf_request *req, *tmp;
1670 uint32_t i;
1671
1672 if (!TAILQ_EMPTY(&sgroup->queued)((&sgroup->queued)->tqh_first == ((void*)0))) {
1673 SPDK_ERRLOG("sgroup->queued not empty when adding subsystem\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1673, __func__, "sgroup->queued not empty when adding subsystem\n"
)
;
1674 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp)for ((req) = (((&sgroup->queued))->tqh_first); (req
) && ((tmp) = (((req))->link.tqe_next), 1); (req) =
(tmp))
{
1675 TAILQ_REMOVE(&sgroup->queued, req, link)do { __typeof__(req) _elm; if (((req)->link.tqe_next) != (
(void*)0)) (req)->link.tqe_next->link.tqe_prev = (req)->
link.tqe_prev; else (&sgroup->queued)->tqh_last = (
req)->link.tqe_prev; *(req)->link.tqe_prev = (req)->
link.tqe_next; for ((_elm) = ((&sgroup->queued)->tqh_first
); (_elm); (_elm) = ((_elm)->link.tqe_next)) { ((void) sizeof
((_elm != req) ? 1 : 0), __extension__ ({ if (_elm != req) ;
else __assert_fail ("_elm != req", "nvmf.c", 1675, __extension__
__PRETTY_FUNCTION__); })); } } while (0)
;
1676 if (nvmf_transport_req_free(req)) {
1677 SPDK_ERRLOG("Transport request free error!\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1677, __func__, "Transport request free error!\n"
)
;
1678 }
1679 }
1680 }
1681
1682 rc = poll_group_update_subsystem(group, subsystem);
1683 if (rc) {
1684 nvmf_poll_group_remove_subsystem(group, subsystem, NULL((void*)0), NULL((void*)0));
1685 goto fini;
1686 }
1687
1688 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1689
1690 for (i = 0; i < sgroup->num_ns; i++) {
1691 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1692 }
1693
1694fini:
1695 if (cb_fn) {
1696 cb_fn(cb_arg, rc);
1697 }
1698
1699 SPDK_DTRACE_PROBE2_TICKS(nvmf_poll_group_add_subsystem, spdk_thread_get_id(group->thread),
1700 subsystem->subnqn);
1701
1702 return rc;
1703}
1704
1705static void
1706_nvmf_poll_group_remove_subsystem_cb(void *ctx, int status)
1707{
1708 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1709 struct spdk_nvmf_subsystem *subsystem;
1710 struct spdk_nvmf_poll_group *group;
1711 struct spdk_nvmf_subsystem_poll_group *sgroup;
1712 spdk_nvmf_poll_group_mod_done cpl_fn = NULL((void*)0);
1713 void *cpl_ctx = NULL((void*)0);
1714 uint32_t nsid;
1715
1716 group = qpair_ctx->group;
1717 subsystem = qpair_ctx->subsystem;
1718 cpl_fn = qpair_ctx->cpl_fn;
1719 cpl_ctx = qpair_ctx->cpl_ctx;
1720 sgroup = &group->sgroups[subsystem->id];
1721
1722 if (status) {
1723 goto fini;
1724 }
1725
1726 for (nsid = 0; nsid < sgroup->num_ns; nsid++) {
1727 if (sgroup->ns_info[nsid].channel) {
1728 spdk_put_io_channel(sgroup->ns_info[nsid].channel);
1729 sgroup->ns_info[nsid].channel = NULL((void*)0);
1730 }
1731 }
1732
1733 sgroup->num_ns = 0;
1734 free(sgroup->ns_info);
1735 sgroup->ns_info = NULL((void*)0);
1736fini:
1737 free(qpair_ctx);
1738 if (cpl_fn) {
1739 cpl_fn(cpl_ctx, status);
1740 }
1741}
1742
1743static void nvmf_poll_group_remove_subsystem_msg(void *ctx);
1744
1745static void
1746nvmf_poll_group_remove_subsystem_msg(void *ctx)
1747{
1748 struct spdk_nvmf_qpair *qpair, *qpair_tmp;
1749 struct spdk_nvmf_subsystem *subsystem;
1750 struct spdk_nvmf_poll_group *group;
1751 struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx;
1752 bool_Bool qpairs_found = false0;
1753 int rc = 0;
1754
1755 group = qpair_ctx->group;
1756 subsystem = qpair_ctx->subsystem;
1757
1758 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qpair_tmp)for ((qpair) = (((&group->qpairs))->tqh_first); (qpair
) && ((qpair_tmp) = (((qpair))->link.tqe_next), 1)
; (qpair) = (qpair_tmp))
{
1759 if ((qpair->ctrlr != NULL((void*)0)) && (qpair->ctrlr->subsys == subsystem)) {
1760 qpairs_found = true1;
1761 rc = spdk_nvmf_qpair_disconnect(qpair);
1762 if (rc && rc != -EINPROGRESS115) {
1763 break;
1764 }
1765 }
1766 }
1767
1768 if (!qpairs_found) {
1769 _nvmf_poll_group_remove_subsystem_cb(ctx, 0);
1770 return;
1771 }
1772
1773 /* Some qpairs are in process of being disconnected. Send a message and try to remove them again */
1774 spdk_thread_send_msg(spdk_get_thread(), nvmf_poll_group_remove_subsystem_msg, ctx);
1775}
1776
1777void
1778nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
1779 struct spdk_nvmf_subsystem *subsystem,
1780 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1781{
1782 struct spdk_nvmf_subsystem_poll_group *sgroup;
1783 struct nvmf_qpair_disconnect_many_ctx *ctx;
1784 uint32_t i;
1785
1786 SPDK_DTRACE_PROBE3_TICKS(nvmf_poll_group_remove_subsystem, group, spdk_thread_get_id(group->thread),
1787 subsystem->subnqn);
1788
1789 ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx));
1790 if (!ctx) {
1791 SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n")spdk_log(SPDK_LOG_ERROR, "nvmf.c", 1791, __func__, "Unable to allocate memory for context to remove poll subsystem\n"
)
;
1792 if (cb_fn) {
1793 cb_fn(cb_arg, -1);
1794 }
1795 return;
1796 }
1797
1798 ctx->group = group;
1799 ctx->subsystem = subsystem;
1800 ctx->cpl_fn = cb_fn;
1801 ctx->cpl_ctx = cb_arg;
1802
1803 sgroup = &group->sgroups[subsystem->id];
1804 sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
1805
1806 for (i = 0; i < sgroup->num_ns; i++) {
1807 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
1808 }
1809
1810 nvmf_poll_group_remove_subsystem_msg(ctx);
1811}
1812
1813void
1814nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
1815 struct spdk_nvmf_subsystem *subsystem,
1816 uint32_t nsid,
1817 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1818{
1819 struct spdk_nvmf_subsystem_poll_group *sgroup;
1820 struct spdk_nvmf_subsystem_pg_ns_info *ns_info = NULL((void*)0);
1821 int rc = 0;
1822 uint32_t i;
1823
1824 if (subsystem->id >= group->num_sgroups) {
1825 rc = -1;
1826 goto fini;
1827 }
1828
1829 sgroup = &group->sgroups[subsystem->id];
1830 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
1831 goto fini;
1832 }
1833 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1834
1835 if (nsid == SPDK_NVME_GLOBAL_NS_TAG((uint32_t)0xFFFFFFFF)) {
1836 for (i = 0; i < sgroup->num_ns; i++) {
1837 ns_info = &sgroup->ns_info[i];
1838 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1839 }
1840 } else {
1841 /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
1842 if (nsid - 1 < sgroup->num_ns) {
1843 ns_info = &sgroup->ns_info[nsid - 1];
1844 ns_info->state = SPDK_NVMF_SUBSYSTEM_PAUSING;
1845 }
1846 }
1847
1848 if (sgroup->mgmt_io_outstanding > 0) {
1849 assert(sgroup->cb_fn == NULL)((void) sizeof ((sgroup->cb_fn == ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup->cb_fn == ((void*)0)) ; else __assert_fail (
"sgroup->cb_fn == NULL", "nvmf.c", 1849, __extension__ __PRETTY_FUNCTION__
); }))
;
1850 sgroup->cb_fn = cb_fn;
1851 assert(sgroup->cb_arg == NULL)((void) sizeof ((sgroup->cb_arg == ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup->cb_arg == ((void*)0)) ; else __assert_fail
("sgroup->cb_arg == NULL", "nvmf.c", 1851, __extension__ __PRETTY_FUNCTION__
); }))
;
1852 sgroup->cb_arg = cb_arg;
1853 return;
1854 }
1855
1856 if (nsid == SPDK_NVME_GLOBAL_NS_TAG((uint32_t)0xFFFFFFFF)) {
1857 for (i = 0; i < sgroup->num_ns; i++) {
1858 ns_info = &sgroup->ns_info[i];
1859
1860 if (ns_info->io_outstanding > 0) {
1861 assert(sgroup->cb_fn == NULL)((void) sizeof ((sgroup->cb_fn == ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup->cb_fn == ((void*)0)) ; else __assert_fail (
"sgroup->cb_fn == NULL", "nvmf.c", 1861, __extension__ __PRETTY_FUNCTION__
); }))
;
1862 sgroup->cb_fn = cb_fn;
1863 assert(sgroup->cb_arg == NULL)((void) sizeof ((sgroup->cb_arg == ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup->cb_arg == ((void*)0)) ; else __assert_fail
("sgroup->cb_arg == NULL", "nvmf.c", 1863, __extension__ __PRETTY_FUNCTION__
); }))
;
1864 sgroup->cb_arg = cb_arg;
1865 return;
1866 }
1867 }
1868 } else {
1869 if (ns_info != NULL((void*)0) && ns_info->io_outstanding > 0) {
1870 assert(sgroup->cb_fn == NULL)((void) sizeof ((sgroup->cb_fn == ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup->cb_fn == ((void*)0)) ; else __assert_fail (
"sgroup->cb_fn == NULL", "nvmf.c", 1870, __extension__ __PRETTY_FUNCTION__
); }))
;
1871 sgroup->cb_fn = cb_fn;
1872 assert(sgroup->cb_arg == NULL)((void) sizeof ((sgroup->cb_arg == ((void*)0)) ? 1 : 0), __extension__
({ if (sgroup->cb_arg == ((void*)0)) ; else __assert_fail
("sgroup->cb_arg == NULL", "nvmf.c", 1872, __extension__ __PRETTY_FUNCTION__
); }))
;
1873 sgroup->cb_arg = cb_arg;
1874 return;
1875 }
1876 }
1877
1878 assert(sgroup->mgmt_io_outstanding == 0)((void) sizeof ((sgroup->mgmt_io_outstanding == 0) ? 1 : 0
), __extension__ ({ if (sgroup->mgmt_io_outstanding == 0) ;
else __assert_fail ("sgroup->mgmt_io_outstanding == 0", "nvmf.c"
, 1878, __extension__ __PRETTY_FUNCTION__); }))
;
1879 sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED;
1880fini:
1881 if (cb_fn) {
1882 cb_fn(cb_arg, rc);
1883 }
1884}
1885
1886void
1887nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
1888 struct spdk_nvmf_subsystem *subsystem,
1889 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
1890{
1891 struct spdk_nvmf_request *req, *tmp;
1892 struct spdk_nvmf_subsystem_poll_group *sgroup;
1893 int rc = 0;
1894 uint32_t i;
1895
1896 if (subsystem->id >= group->num_sgroups) {
1897 rc = -1;
1898 goto fini;
1899 }
1900
1901 sgroup = &group->sgroups[subsystem->id];
1902
1903 if (sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
1904 goto fini;
1905 }
1906
1907 rc = poll_group_update_subsystem(group, subsystem);
1908 if (rc) {
1909 goto fini;
1910 }
1911
1912 for (i = 0; i < sgroup->num_ns; i++) {
1913 sgroup->ns_info[i].state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1914 }
1915
1916 sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
1917
1918 /* Release all queued requests */
1919 TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp)for ((req) = (((&sgroup->queued))->tqh_first); (req
) && ((tmp) = (((req))->link.tqe_next), 1); (req) =
(tmp))
{
1920 TAILQ_REMOVE(&sgroup->queued, req, link)do { __typeof__(req) _elm; if (((req)->link.tqe_next) != (
(void*)0)) (req)->link.tqe_next->link.tqe_prev = (req)->
link.tqe_prev; else (&sgroup->queued)->tqh_last = (
req)->link.tqe_prev; *(req)->link.tqe_prev = (req)->
link.tqe_next; for ((_elm) = ((&sgroup->queued)->tqh_first
); (_elm); (_elm) = ((_elm)->link.tqe_next)) { ((void) sizeof
((_elm != req) ? 1 : 0), __extension__ ({ if (_elm != req) ;
else __assert_fail ("_elm != req", "nvmf.c", 1920, __extension__
__PRETTY_FUNCTION__); })); } } while (0)
;
1921 if (spdk_nvmf_request_using_zcopy(req)) {
1922 spdk_nvmf_request_zcopy_start(req);
1923 } else {
1924 spdk_nvmf_request_exec(req);
1925 }
1926
1927 }
1928fini:
1929 if (cb_fn) {
1930 cb_fn(cb_arg, rc);
1931 }
1932}
1933
1934
1935struct spdk_nvmf_poll_group *
1936spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
1937{
1938 struct spdk_nvmf_transport_poll_group *tgroup;
1939
1940 tgroup = nvmf_transport_get_optimal_poll_group(qpair->transport, qpair);
1941
1942 if (tgroup == NULL((void*)0)) {
1943 return NULL((void*)0);
1944 }
1945
1946 return tgroup->group;
1947}
1948
1949void
1950spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_json_write_ctx *w)
1951{
1952 struct spdk_nvmf_transport_poll_group *tgroup;
1953
1954 spdk_json_write_object_begin(w);
1955
1956 spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread()));
1957 spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs);
1958 spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs);
1959 spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs);
1960 spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs);
1961 spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io);
1962 spdk_json_write_named_uint64(w, "completed_nvme_io", group->stat.completed_nvme_io);
1963
1964 spdk_json_write_named_array_begin(w, "transports");
1965
1966 TAILQ_FOREACH(tgroup, &group->tgroups, link)for ((tgroup) = ((&group->tgroups)->tqh_first); (tgroup
); (tgroup) = ((tgroup)->link.tqe_next))
{
1967 spdk_json_write_object_begin(w);
1968 /*
1969 * The trtype field intentionally contains a transport name as this is more informative.
1970 * The field has not been renamed for backward compatibility.
1971 */
1972 spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(tgroup->transport));
1973
1974 if (tgroup->transport->ops->poll_group_dump_stat) {
1975 tgroup->transport->ops->poll_group_dump_stat(tgroup, w);
1976 }
1977
1978 spdk_json_write_object_end(w);
1979 }
1980
1981 spdk_json_write_array_end(w);
1982 spdk_json_write_object_end(w);
1983}