Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "event_nvmf.h"
8 :
9 : #include "spdk/bdev.h"
10 : #include "spdk/thread.h"
11 : #include "spdk/log.h"
12 : #include "spdk/nvme.h"
13 : #include "spdk/nvmf_cmd.h"
14 : #include "spdk_internal/usdt.h"
15 :
16 : enum nvmf_tgt_state {
17 : NVMF_TGT_INIT_NONE = 0,
18 : NVMF_TGT_INIT_CREATE_TARGET,
19 : NVMF_TGT_INIT_CREATE_POLL_GROUPS,
20 : NVMF_TGT_INIT_START_SUBSYSTEMS,
21 : NVMF_TGT_RUNNING,
22 : NVMF_TGT_FINI_STOP_LISTEN,
23 : NVMF_TGT_FINI_STOP_SUBSYSTEMS,
24 : NVMF_TGT_FINI_DESTROY_SUBSYSTEMS,
25 : NVMF_TGT_FINI_DESTROY_POLL_GROUPS,
26 : NVMF_TGT_FINI_DESTROY_TARGET,
27 : NVMF_TGT_STOPPED,
28 : NVMF_TGT_ERROR,
29 : };
30 :
31 : struct nvmf_tgt_poll_group {
32 : struct spdk_nvmf_poll_group *group;
33 : struct spdk_thread *thread;
34 : TAILQ_ENTRY(nvmf_tgt_poll_group) link;
35 : };
36 :
37 : #define NVMF_TGT_DEFAULT_DIGESTS (SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA256) | \
38 : SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA384) | \
39 : SPDK_BIT(SPDK_NVMF_DHCHAP_HASH_SHA512))
40 :
41 : #define NVMF_TGT_DEFAULT_DHGROUPS (SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_NULL) | \
42 : SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_2048) | \
43 : SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_3072) | \
44 : SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_4096) | \
45 : SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_6144) | \
46 : SPDK_BIT(SPDK_NVMF_DHCHAP_DHGROUP_8192))
47 :
48 : struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf = {
49 : .opts = {
50 : .size = SPDK_SIZEOF(&g_spdk_nvmf_tgt_conf.opts, dhchap_dhgroups),
51 : .name = "nvmf_tgt",
52 : .max_subsystems = 0,
53 : .crdt = { 0, 0, 0 },
54 : .discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY,
55 : .dhchap_digests = NVMF_TGT_DEFAULT_DIGESTS,
56 : .dhchap_dhgroups = NVMF_TGT_DEFAULT_DHGROUPS,
57 : },
58 : .admin_passthru.identify_ctrlr = false
59 : };
60 :
61 : struct spdk_cpuset *g_poll_groups_mask = NULL;
62 : struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
63 :
64 : static enum nvmf_tgt_state g_tgt_state;
65 :
66 : static struct spdk_thread *g_tgt_init_thread = NULL;
67 : static struct spdk_thread *g_tgt_fini_thread = NULL;
68 :
69 : static TAILQ_HEAD(, nvmf_tgt_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups);
70 : static size_t g_num_poll_groups = 0;
71 :
72 : static void nvmf_tgt_advance_state(void);
73 :
74 : static void
75 0 : nvmf_shutdown_cb(void *arg1)
76 : {
77 : /* Still in initialization state, defer shutdown operation */
78 0 : if (g_tgt_state < NVMF_TGT_RUNNING) {
79 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_shutdown_cb, NULL);
80 0 : return;
81 0 : } else if (g_tgt_state != NVMF_TGT_RUNNING && g_tgt_state != NVMF_TGT_ERROR) {
82 : /* Already in Shutdown status, ignore the signal */
83 0 : return;
84 : }
85 :
86 0 : if (g_tgt_state == NVMF_TGT_ERROR) {
87 : /* Parse configuration error */
88 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
89 0 : } else {
90 0 : g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
91 : }
92 0 : nvmf_tgt_advance_state();
93 0 : }
94 :
95 : static void
96 0 : nvmf_subsystem_fini(void)
97 : {
98 0 : nvmf_shutdown_cb(NULL);
99 0 : }
100 :
101 : static void
102 0 : _nvmf_tgt_destroy_poll_group_done(void *ctx)
103 : {
104 0 : assert(g_num_poll_groups > 0);
105 :
106 0 : if (--g_num_poll_groups == 0) {
107 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
108 0 : nvmf_tgt_advance_state();
109 0 : }
110 0 : }
111 :
112 : static void
113 0 : nvmf_tgt_destroy_poll_group_done(void *cb_arg, int status)
114 : {
115 0 : struct nvmf_tgt_poll_group *pg = cb_arg;
116 :
117 0 : free(pg);
118 :
119 0 : spdk_thread_send_msg(g_tgt_fini_thread, _nvmf_tgt_destroy_poll_group_done, NULL);
120 :
121 0 : spdk_thread_exit(spdk_get_thread());
122 0 : }
123 :
124 : static void
125 0 : nvmf_tgt_destroy_poll_group(void *ctx)
126 : {
127 0 : struct nvmf_tgt_poll_group *pg = ctx;
128 :
129 0 : spdk_nvmf_poll_group_destroy(pg->group, nvmf_tgt_destroy_poll_group_done, pg);
130 0 : }
131 :
132 : static void
133 0 : nvmf_tgt_destroy_poll_groups(void)
134 : {
135 : struct nvmf_tgt_poll_group *pg, *tpg;
136 :
137 0 : g_tgt_fini_thread = spdk_get_thread();
138 0 : assert(g_tgt_fini_thread != NULL);
139 :
140 0 : TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tpg) {
141 0 : TAILQ_REMOVE(&g_poll_groups, pg, link);
142 0 : spdk_thread_send_msg(pg->thread, nvmf_tgt_destroy_poll_group, pg);
143 0 : }
144 0 : }
145 :
146 : static uint32_t
147 0 : nvmf_get_cpuset_count(void)
148 : {
149 0 : if (g_poll_groups_mask) {
150 0 : return spdk_cpuset_count(g_poll_groups_mask);
151 : } else {
152 0 : return spdk_env_get_core_count();
153 : }
154 0 : }
155 :
156 : static void
157 0 : nvmf_tgt_create_poll_group_done(void *ctx)
158 : {
159 0 : struct nvmf_tgt_poll_group *pg = ctx;
160 :
161 0 : assert(pg);
162 :
163 0 : if (!pg->group) {
164 0 : SPDK_ERRLOG("Failed to create nvmf poll group\n");
165 : /* Change the state to error but wait for completions from all other threads */
166 0 : g_tgt_state = NVMF_TGT_ERROR;
167 0 : }
168 :
169 0 : TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
170 :
171 0 : assert(g_num_poll_groups < nvmf_get_cpuset_count());
172 :
173 0 : if (++g_num_poll_groups == nvmf_get_cpuset_count()) {
174 0 : if (g_tgt_state != NVMF_TGT_ERROR) {
175 0 : g_tgt_state = NVMF_TGT_INIT_START_SUBSYSTEMS;
176 0 : }
177 0 : nvmf_tgt_advance_state();
178 0 : }
179 0 : }
180 :
181 : static void
182 0 : nvmf_tgt_create_poll_group(void *ctx)
183 : {
184 : struct nvmf_tgt_poll_group *pg;
185 :
186 0 : pg = calloc(1, sizeof(*pg));
187 0 : if (!pg) {
188 0 : SPDK_ERRLOG("Not enough memory to allocate poll groups\n");
189 0 : g_tgt_state = NVMF_TGT_ERROR;
190 0 : nvmf_tgt_advance_state();
191 0 : return;
192 : }
193 :
194 0 : pg->thread = spdk_get_thread();
195 0 : pg->group = spdk_nvmf_poll_group_create(g_spdk_nvmf_tgt);
196 :
197 0 : spdk_thread_send_msg(g_tgt_init_thread, nvmf_tgt_create_poll_group_done, pg);
198 0 : }
199 :
200 : static void
201 0 : nvmf_tgt_create_poll_groups(void)
202 : {
203 0 : uint32_t cpu, count = 0;
204 : char thread_name[32];
205 : struct spdk_thread *thread;
206 :
207 0 : g_tgt_init_thread = spdk_get_thread();
208 0 : assert(g_tgt_init_thread != NULL);
209 :
210 0 : SPDK_ENV_FOREACH_CORE(cpu) {
211 0 : if (g_poll_groups_mask && !spdk_cpuset_get_cpu(g_poll_groups_mask, cpu)) {
212 0 : continue;
213 : }
214 0 : snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%03u", count++);
215 :
216 0 : thread = spdk_thread_create(thread_name, g_poll_groups_mask);
217 0 : assert(thread != NULL);
218 :
219 0 : spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);
220 0 : }
221 0 : }
222 :
223 : static void
224 0 : nvmf_tgt_subsystem_started(struct spdk_nvmf_subsystem *subsystem,
225 : void *cb_arg, int status)
226 : {
227 0 : subsystem = spdk_nvmf_subsystem_get_next(subsystem);
228 : int rc;
229 :
230 0 : if (subsystem) {
231 0 : rc = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
232 0 : if (rc) {
233 0 : g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
234 0 : SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
235 0 : nvmf_tgt_advance_state();
236 0 : }
237 0 : return;
238 : }
239 :
240 0 : g_tgt_state = NVMF_TGT_RUNNING;
241 0 : nvmf_tgt_advance_state();
242 0 : }
243 :
244 : static void
245 0 : nvmf_tgt_subsystem_stopped(struct spdk_nvmf_subsystem *subsystem,
246 : void *cb_arg, int status)
247 : {
248 0 : subsystem = spdk_nvmf_subsystem_get_next(subsystem);
249 : int rc;
250 :
251 0 : if (subsystem) {
252 0 : rc = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
253 0 : if (rc) {
254 0 : SPDK_ERRLOG("Unable to stop NVMe-oF subsystem %s with rc %d, Trying others.\n",
255 : spdk_nvmf_subsystem_get_nqn(subsystem), rc);
256 0 : nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
257 0 : }
258 0 : return;
259 : }
260 :
261 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
262 0 : nvmf_tgt_advance_state();
263 0 : }
264 :
265 : static void
266 0 : nvmf_tgt_stop_listen(void)
267 : {
268 : struct spdk_nvmf_subsystem *subsystem;
269 : struct spdk_nvmf_subsystem_listener *listener;
270 : const struct spdk_nvme_transport_id *trid;
271 : struct spdk_nvmf_transport *transport;
272 : int rc;
273 :
274 0 : for (subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
275 0 : subsystem != NULL;
276 0 : subsystem = spdk_nvmf_subsystem_get_next(subsystem)) {
277 0 : for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem);
278 0 : listener != NULL;
279 0 : listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
280 0 : trid = spdk_nvmf_subsystem_listener_get_trid(listener);
281 0 : transport = spdk_nvmf_tgt_get_transport(g_spdk_nvmf_tgt, trid->trstring);
282 0 : rc = spdk_nvmf_transport_stop_listen(transport, trid);
283 0 : if (rc != 0) {
284 0 : SPDK_ERRLOG("Unable to stop subsystem %s listener %s:%s, rc %d. Trying others.\n",
285 : spdk_nvmf_subsystem_get_nqn(subsystem), trid->traddr, trid->trsvcid, rc);
286 0 : continue;
287 : }
288 0 : }
289 0 : }
290 :
291 0 : g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
292 0 : }
293 :
294 : static void
295 0 : _nvmf_tgt_subsystem_destroy(void *cb_arg)
296 : {
297 : struct spdk_nvmf_subsystem *subsystem, *next_subsystem;
298 : int rc;
299 :
300 0 : subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
301 :
302 0 : while (subsystem != NULL) {
303 0 : next_subsystem = spdk_nvmf_subsystem_get_next(subsystem);
304 0 : rc = spdk_nvmf_subsystem_destroy(subsystem, _nvmf_tgt_subsystem_destroy, NULL);
305 0 : if (rc) {
306 0 : if (rc == -EINPROGRESS) {
307 : /* If ret is -EINPROGRESS, nvmf_tgt_subsystem_destroyed will be called when subsystem
308 : * is destroyed, _nvmf_tgt_subsystem_destroy will continue to destroy other subsystems if any */
309 0 : return;
310 : } else {
311 0 : SPDK_ERRLOG("Unable to destroy subsystem %s, rc %d. Trying others.\n",
312 : spdk_nvmf_subsystem_get_nqn(subsystem), rc);
313 : }
314 0 : }
315 0 : subsystem = next_subsystem;
316 : }
317 :
318 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_POLL_GROUPS;
319 0 : nvmf_tgt_advance_state();
320 0 : }
321 :
322 : static void
323 0 : nvmf_tgt_destroy_done(void *ctx, int status)
324 : {
325 0 : g_tgt_state = NVMF_TGT_STOPPED;
326 :
327 0 : nvmf_tgt_advance_state();
328 0 : }
329 :
330 : static int
331 0 : nvmf_add_discovery_subsystem(void)
332 : {
333 : struct spdk_nvmf_subsystem *subsystem;
334 :
335 0 : subsystem = spdk_nvmf_subsystem_create(g_spdk_nvmf_tgt, SPDK_NVMF_DISCOVERY_NQN,
336 : SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT, 0);
337 0 : if (subsystem == NULL) {
338 0 : SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n");
339 0 : return -1;
340 : }
341 :
342 0 : spdk_nvmf_subsystem_set_allow_any_host(subsystem, true);
343 :
344 0 : return 0;
345 0 : }
346 :
347 : static int
348 0 : nvmf_tgt_create_target(void)
349 : {
350 0 : g_spdk_nvmf_tgt = spdk_nvmf_tgt_create(&g_spdk_nvmf_tgt_conf.opts);
351 0 : if (!g_spdk_nvmf_tgt) {
352 0 : SPDK_ERRLOG("spdk_nvmf_tgt_create() failed\n");
353 0 : return -1;
354 : }
355 :
356 0 : if (nvmf_add_discovery_subsystem() != 0) {
357 0 : SPDK_ERRLOG("nvmf_add_discovery_subsystem failed\n");
358 0 : return -1;
359 : }
360 :
361 0 : return 0;
362 0 : }
363 :
364 : static void
365 0 : fixup_identify_ctrlr(struct spdk_nvmf_request *req)
366 : {
367 0 : struct spdk_nvme_ctrlr_data nvme_cdata = {};
368 0 : struct spdk_nvme_ctrlr_data nvmf_cdata = {};
369 0 : struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
370 0 : struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
371 : size_t datalen;
372 : int rc;
373 :
374 : /* This is the identify data from the NVMe drive */
375 0 : datalen = spdk_nvmf_request_copy_to_buf(req, &nvme_cdata,
376 : sizeof(nvme_cdata));
377 :
378 : /* Get the NVMF identify data */
379 0 : rc = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, &nvmf_cdata);
380 0 : if (rc != SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
381 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
382 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
383 0 : return;
384 : }
385 :
386 : /* Fixup NVMF identify data with NVMe identify data */
387 :
388 : /* Serial Number (SN) */
389 0 : memcpy(&nvmf_cdata.sn[0], &nvme_cdata.sn[0], sizeof(nvmf_cdata.sn));
390 : /* Model Number (MN) */
391 0 : memcpy(&nvmf_cdata.mn[0], &nvme_cdata.mn[0], sizeof(nvmf_cdata.mn));
392 : /* Firmware Revision (FR) */
393 0 : memcpy(&nvmf_cdata.fr[0], &nvme_cdata.fr[0], sizeof(nvmf_cdata.fr));
394 : /* IEEE OUI Identifier (IEEE) */
395 0 : memcpy(&nvmf_cdata.ieee[0], &nvme_cdata.ieee[0], sizeof(nvmf_cdata.ieee));
396 : /* FRU Globally Unique Identifier (FGUID) */
397 :
398 : /* Copy the fixed up data back to the response */
399 0 : spdk_nvmf_request_copy_from_buf(req, &nvmf_cdata, datalen);
400 0 : }
401 :
402 : static int
403 0 : nvmf_custom_identify_hdlr(struct spdk_nvmf_request *req)
404 : {
405 0 : struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
406 : struct spdk_bdev *bdev;
407 : struct spdk_bdev_desc *desc;
408 : struct spdk_io_channel *ch;
409 : struct spdk_nvmf_subsystem *subsys;
410 : int rc;
411 :
412 0 : if (cmd->cdw10_bits.identify.cns != SPDK_NVME_IDENTIFY_CTRLR) {
413 0 : return -1; /* continue */
414 : }
415 :
416 0 : subsys = spdk_nvmf_request_get_subsystem(req);
417 0 : if (subsys == NULL) {
418 0 : return -1;
419 : }
420 :
421 : /* Only procss this request if it has exactly one namespace */
422 0 : if (spdk_nvmf_subsystem_get_max_nsid(subsys) != 1) {
423 0 : return -1;
424 : }
425 :
426 : /* Forward to first namespace if it supports NVME admin commands */
427 0 : rc = spdk_nvmf_request_get_bdev(1, req, &bdev, &desc, &ch);
428 0 : if (rc) {
429 : /* No bdev found for this namespace. Continue. */
430 0 : return -1;
431 : }
432 :
433 0 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
434 0 : return -1;
435 : }
436 :
437 0 : return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, fixup_identify_ctrlr);
438 0 : }
439 :
440 : static void
441 0 : nvmf_tgt_advance_state(void)
442 : {
443 : enum nvmf_tgt_state prev_state;
444 0 : int rc = -1;
445 : int ret;
446 :
447 0 : do {
448 : SPDK_DTRACE_PROBE1(nvmf_tgt_state, g_tgt_state);
449 0 : prev_state = g_tgt_state;
450 :
451 0 : switch (g_tgt_state) {
452 : case NVMF_TGT_INIT_NONE: {
453 0 : g_tgt_state = NVMF_TGT_INIT_CREATE_TARGET;
454 0 : break;
455 : }
456 : case NVMF_TGT_INIT_CREATE_TARGET:
457 0 : ret = nvmf_tgt_create_target();
458 0 : g_tgt_state = (ret == 0) ? NVMF_TGT_INIT_CREATE_POLL_GROUPS : NVMF_TGT_ERROR;
459 0 : break;
460 : case NVMF_TGT_INIT_CREATE_POLL_GROUPS:
461 0 : if (g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr) {
462 0 : SPDK_NOTICELOG("Custom identify ctrlr handler enabled\n");
463 0 : spdk_nvmf_set_custom_admin_cmd_hdlr(SPDK_NVME_OPC_IDENTIFY, nvmf_custom_identify_hdlr);
464 0 : }
465 : /* Create poll group threads, and send a message to each thread
466 : * and create a poll group.
467 : */
468 0 : nvmf_tgt_create_poll_groups();
469 0 : break;
470 : case NVMF_TGT_INIT_START_SUBSYSTEMS: {
471 : struct spdk_nvmf_subsystem *subsystem;
472 :
473 0 : subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
474 :
475 0 : if (subsystem) {
476 0 : ret = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
477 0 : if (ret) {
478 0 : SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
479 0 : g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
480 0 : }
481 0 : } else {
482 0 : g_tgt_state = NVMF_TGT_RUNNING;
483 : }
484 0 : break;
485 : }
486 : case NVMF_TGT_RUNNING:
487 0 : spdk_subsystem_init_next(0);
488 0 : break;
489 : case NVMF_TGT_FINI_STOP_LISTEN:
490 0 : nvmf_tgt_stop_listen();
491 0 : break;
492 : case NVMF_TGT_FINI_STOP_SUBSYSTEMS: {
493 : struct spdk_nvmf_subsystem *subsystem;
494 :
495 0 : subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
496 :
497 0 : if (subsystem) {
498 0 : ret = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
499 0 : if (ret) {
500 0 : nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
501 0 : }
502 0 : } else {
503 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
504 : }
505 0 : break;
506 : }
507 : case NVMF_TGT_FINI_DESTROY_SUBSYSTEMS:
508 0 : _nvmf_tgt_subsystem_destroy(NULL);
509 : /* Function above can be asynchronous, it will call nvmf_tgt_advance_state() once done.
510 : * So just return here */
511 0 : return;
512 : case NVMF_TGT_FINI_DESTROY_POLL_GROUPS:
513 : /* Send a message to each poll group thread, and terminate the thread */
514 0 : nvmf_tgt_destroy_poll_groups();
515 0 : break;
516 : case NVMF_TGT_FINI_DESTROY_TARGET:
517 0 : spdk_nvmf_tgt_destroy(g_spdk_nvmf_tgt, nvmf_tgt_destroy_done, NULL);
518 0 : break;
519 : case NVMF_TGT_STOPPED:
520 0 : spdk_subsystem_fini_next();
521 0 : return;
522 : case NVMF_TGT_ERROR:
523 0 : spdk_subsystem_init_next(rc);
524 0 : return;
525 : }
526 :
527 0 : } while (g_tgt_state != prev_state);
528 0 : }
529 :
530 : static void
531 0 : nvmf_subsystem_init(void)
532 : {
533 0 : g_tgt_state = NVMF_TGT_INIT_NONE;
534 0 : nvmf_tgt_advance_state();
535 0 : }
536 :
537 : static void
538 0 : nvmf_subsystem_dump_discover_filter(struct spdk_json_write_ctx *w)
539 : {
540 : static char const *const answers[] = {
541 : "match_any",
542 : "transport",
543 : "address",
544 : "transport,address",
545 : "svcid",
546 : "transport,svcid",
547 : "address,svcid",
548 : "transport,address,svcid"
549 : };
550 :
551 0 : if ((g_spdk_nvmf_tgt_conf.opts.discovery_filter & ~(SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_TYPE |
552 : SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_ADDRESS |
553 0 : SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_SVCID)) != 0) {
554 0 : SPDK_ERRLOG("Incorrect discovery filter %d\n", g_spdk_nvmf_tgt_conf.opts.discovery_filter);
555 0 : assert(0);
556 : return;
557 : }
558 :
559 0 : spdk_json_write_named_string(w, "discovery_filter",
560 0 : answers[g_spdk_nvmf_tgt_conf.opts.discovery_filter]);
561 0 : }
562 :
563 : static void
564 0 : nvmf_subsystem_write_config_json(struct spdk_json_write_ctx *w)
565 : {
566 : int i;
567 :
568 0 : spdk_json_write_array_begin(w);
569 :
570 0 : spdk_json_write_object_begin(w);
571 0 : spdk_json_write_named_string(w, "method", "nvmf_set_config");
572 :
573 0 : spdk_json_write_named_object_begin(w, "params");
574 0 : nvmf_subsystem_dump_discover_filter(w);
575 0 : spdk_json_write_named_object_begin(w, "admin_cmd_passthru");
576 0 : spdk_json_write_named_bool(w, "identify_ctrlr",
577 0 : g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr);
578 0 : spdk_json_write_object_end(w);
579 0 : if (g_poll_groups_mask) {
580 0 : spdk_json_write_named_string(w, "poll_groups_mask", spdk_cpuset_fmt(g_poll_groups_mask));
581 0 : }
582 0 : spdk_json_write_named_array_begin(w, "dhchap_digests");
583 0 : for (i = 0; i < 32; ++i) {
584 0 : if (g_spdk_nvmf_tgt_conf.opts.dhchap_digests & SPDK_BIT(i)) {
585 0 : spdk_json_write_string(w, spdk_nvme_dhchap_get_digest_name(i));
586 0 : }
587 0 : }
588 0 : spdk_json_write_array_end(w);
589 0 : spdk_json_write_named_array_begin(w, "dhchap_dhgroups");
590 0 : for (i = 0; i < 32; ++i) {
591 0 : if (g_spdk_nvmf_tgt_conf.opts.dhchap_dhgroups & SPDK_BIT(i)) {
592 0 : spdk_json_write_string(w, spdk_nvme_dhchap_get_dhgroup_name(i));
593 0 : }
594 0 : }
595 0 : spdk_json_write_array_end(w);
596 0 : spdk_json_write_object_end(w);
597 0 : spdk_json_write_object_end(w);
598 :
599 0 : spdk_nvmf_tgt_write_config_json(w, g_spdk_nvmf_tgt);
600 0 : spdk_json_write_array_end(w);
601 0 : }
602 :
603 : static struct spdk_subsystem g_spdk_subsystem_nvmf = {
604 : .name = "nvmf",
605 : .init = nvmf_subsystem_init,
606 : .fini = nvmf_subsystem_fini,
607 : .write_config_json = nvmf_subsystem_write_config_json,
608 : };
609 :
610 0 : SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_nvmf)
611 0 : SPDK_SUBSYSTEM_DEPEND(nvmf, bdev)
612 0 : SPDK_SUBSYSTEM_DEPEND(nvmf, keyring)
613 0 : SPDK_SUBSYSTEM_DEPEND(nvmf, sock)
|