Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "event_nvmf.h"
8 :
9 : #include "spdk/bdev.h"
10 : #include "spdk/thread.h"
11 : #include "spdk/log.h"
12 : #include "spdk/nvme.h"
13 : #include "spdk/nvmf_cmd.h"
14 : #include "spdk_internal/usdt.h"
15 :
16 : enum nvmf_tgt_state {
17 : NVMF_TGT_INIT_NONE = 0,
18 : NVMF_TGT_INIT_CREATE_TARGET,
19 : NVMF_TGT_INIT_CREATE_POLL_GROUPS,
20 : NVMF_TGT_INIT_START_SUBSYSTEMS,
21 : NVMF_TGT_RUNNING,
22 : NVMF_TGT_FINI_STOP_LISTEN,
23 : NVMF_TGT_FINI_STOP_SUBSYSTEMS,
24 : NVMF_TGT_FINI_DESTROY_SUBSYSTEMS,
25 : NVMF_TGT_FINI_DESTROY_POLL_GROUPS,
26 : NVMF_TGT_FINI_DESTROY_TARGET,
27 : NVMF_TGT_STOPPED,
28 : NVMF_TGT_ERROR,
29 : };
30 :
31 : struct nvmf_tgt_poll_group {
32 : struct spdk_nvmf_poll_group *group;
33 : struct spdk_thread *thread;
34 : TAILQ_ENTRY(nvmf_tgt_poll_group) link;
35 : };
36 :
37 : struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf = {
38 : .opts = {
39 : .size = SPDK_SIZEOF(&g_spdk_nvmf_tgt_conf.opts, dhchap_dhgroups),
40 : .name = "nvmf_tgt",
41 : .max_subsystems = 0,
42 : .crdt = { 0, 0, 0 },
43 : .discovery_filter = SPDK_NVMF_TGT_DISCOVERY_MATCH_ANY,
44 : .dhchap_digests = UINT32_MAX,
45 : .dhchap_dhgroups = UINT32_MAX,
46 : },
47 : .admin_passthru.identify_ctrlr = false
48 : };
49 :
50 : struct spdk_cpuset *g_poll_groups_mask = NULL;
51 : struct spdk_nvmf_tgt *g_spdk_nvmf_tgt = NULL;
52 :
53 : static enum nvmf_tgt_state g_tgt_state;
54 :
55 : static struct spdk_thread *g_tgt_init_thread = NULL;
56 : static struct spdk_thread *g_tgt_fini_thread = NULL;
57 :
58 : static TAILQ_HEAD(, nvmf_tgt_poll_group) g_poll_groups = TAILQ_HEAD_INITIALIZER(g_poll_groups);
59 : static size_t g_num_poll_groups = 0;
60 :
61 : static void nvmf_tgt_advance_state(void);
62 :
63 : static void
64 0 : nvmf_shutdown_cb(void *arg1)
65 : {
66 : /* Still in initialization state, defer shutdown operation */
67 0 : if (g_tgt_state < NVMF_TGT_RUNNING) {
68 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_shutdown_cb, NULL);
69 0 : return;
70 0 : } else if (g_tgt_state != NVMF_TGT_RUNNING && g_tgt_state != NVMF_TGT_ERROR) {
71 : /* Already in Shutdown status, ignore the signal */
72 0 : return;
73 : }
74 :
75 0 : if (g_tgt_state == NVMF_TGT_ERROR) {
76 : /* Parse configuration error */
77 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
78 : } else {
79 0 : g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
80 : }
81 0 : nvmf_tgt_advance_state();
82 : }
83 :
84 : static void
85 0 : nvmf_subsystem_fini(void)
86 : {
87 0 : nvmf_shutdown_cb(NULL);
88 0 : }
89 :
90 : static void
91 0 : _nvmf_tgt_destroy_poll_group_done(void *ctx)
92 : {
93 0 : assert(g_num_poll_groups > 0);
94 :
95 0 : if (--g_num_poll_groups == 0) {
96 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_TARGET;
97 0 : nvmf_tgt_advance_state();
98 : }
99 0 : }
100 :
101 : static void
102 0 : nvmf_tgt_destroy_poll_group_done(void *cb_arg, int status)
103 : {
104 0 : struct nvmf_tgt_poll_group *pg = cb_arg;
105 :
106 0 : free(pg);
107 :
108 0 : spdk_thread_send_msg(g_tgt_fini_thread, _nvmf_tgt_destroy_poll_group_done, NULL);
109 :
110 0 : spdk_thread_exit(spdk_get_thread());
111 0 : }
112 :
113 : static void
114 0 : nvmf_tgt_destroy_poll_group(void *ctx)
115 : {
116 0 : struct nvmf_tgt_poll_group *pg = ctx;
117 :
118 0 : spdk_nvmf_poll_group_destroy(pg->group, nvmf_tgt_destroy_poll_group_done, pg);
119 0 : }
120 :
121 : static void
122 0 : nvmf_tgt_destroy_poll_groups(void)
123 : {
124 : struct nvmf_tgt_poll_group *pg, *tpg;
125 :
126 0 : g_tgt_fini_thread = spdk_get_thread();
127 0 : assert(g_tgt_fini_thread != NULL);
128 :
129 0 : TAILQ_FOREACH_SAFE(pg, &g_poll_groups, link, tpg) {
130 0 : TAILQ_REMOVE(&g_poll_groups, pg, link);
131 0 : spdk_thread_send_msg(pg->thread, nvmf_tgt_destroy_poll_group, pg);
132 : }
133 0 : }
134 :
135 : static uint32_t
136 0 : nvmf_get_cpuset_count(void)
137 : {
138 0 : if (g_poll_groups_mask) {
139 0 : return spdk_cpuset_count(g_poll_groups_mask);
140 : } else {
141 0 : return spdk_env_get_core_count();
142 : }
143 : }
144 :
145 : static void
146 0 : nvmf_tgt_create_poll_group_done(void *ctx)
147 : {
148 0 : struct nvmf_tgt_poll_group *pg = ctx;
149 :
150 0 : assert(pg);
151 :
152 0 : if (!pg->group) {
153 0 : SPDK_ERRLOG("Failed to create nvmf poll group\n");
154 : /* Change the state to error but wait for completions from all other threads */
155 0 : g_tgt_state = NVMF_TGT_ERROR;
156 : }
157 :
158 0 : TAILQ_INSERT_TAIL(&g_poll_groups, pg, link);
159 :
160 0 : assert(g_num_poll_groups < nvmf_get_cpuset_count());
161 :
162 0 : if (++g_num_poll_groups == nvmf_get_cpuset_count()) {
163 0 : if (g_tgt_state != NVMF_TGT_ERROR) {
164 0 : g_tgt_state = NVMF_TGT_INIT_START_SUBSYSTEMS;
165 : }
166 0 : nvmf_tgt_advance_state();
167 : }
168 0 : }
169 :
170 : static void
171 0 : nvmf_tgt_create_poll_group(void *ctx)
172 : {
173 : struct nvmf_tgt_poll_group *pg;
174 :
175 0 : pg = calloc(1, sizeof(*pg));
176 0 : if (!pg) {
177 0 : SPDK_ERRLOG("Not enough memory to allocate poll groups\n");
178 0 : g_tgt_state = NVMF_TGT_ERROR;
179 0 : nvmf_tgt_advance_state();
180 0 : return;
181 : }
182 :
183 0 : pg->thread = spdk_get_thread();
184 0 : pg->group = spdk_nvmf_poll_group_create(g_spdk_nvmf_tgt);
185 :
186 0 : spdk_thread_send_msg(g_tgt_init_thread, nvmf_tgt_create_poll_group_done, pg);
187 : }
188 :
189 : static void
190 0 : nvmf_tgt_create_poll_groups(void)
191 : {
192 0 : uint32_t cpu, count = 0;
193 0 : char thread_name[32];
194 : struct spdk_thread *thread;
195 :
196 0 : g_tgt_init_thread = spdk_get_thread();
197 0 : assert(g_tgt_init_thread != NULL);
198 :
199 0 : SPDK_ENV_FOREACH_CORE(cpu) {
200 0 : if (g_poll_groups_mask && !spdk_cpuset_get_cpu(g_poll_groups_mask, cpu)) {
201 0 : continue;
202 : }
203 0 : snprintf(thread_name, sizeof(thread_name), "nvmf_tgt_poll_group_%03u", count++);
204 :
205 0 : thread = spdk_thread_create(thread_name, g_poll_groups_mask);
206 0 : assert(thread != NULL);
207 :
208 0 : spdk_thread_send_msg(thread, nvmf_tgt_create_poll_group, NULL);
209 : }
210 0 : }
211 :
212 : static void
213 0 : nvmf_tgt_subsystem_started(struct spdk_nvmf_subsystem *subsystem,
214 : void *cb_arg, int status)
215 : {
216 0 : subsystem = spdk_nvmf_subsystem_get_next(subsystem);
217 : int rc;
218 :
219 0 : if (subsystem) {
220 0 : rc = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
221 0 : if (rc) {
222 0 : g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
223 0 : SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
224 0 : nvmf_tgt_advance_state();
225 : }
226 0 : return;
227 : }
228 :
229 0 : g_tgt_state = NVMF_TGT_RUNNING;
230 0 : nvmf_tgt_advance_state();
231 : }
232 :
233 : static void
234 0 : nvmf_tgt_subsystem_stopped(struct spdk_nvmf_subsystem *subsystem,
235 : void *cb_arg, int status)
236 : {
237 0 : subsystem = spdk_nvmf_subsystem_get_next(subsystem);
238 : int rc;
239 :
240 0 : if (subsystem) {
241 0 : rc = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
242 0 : if (rc) {
243 0 : SPDK_ERRLOG("Unable to stop NVMe-oF subsystem %s with rc %d, Trying others.\n",
244 : spdk_nvmf_subsystem_get_nqn(subsystem), rc);
245 0 : nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
246 : }
247 0 : return;
248 : }
249 :
250 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
251 0 : nvmf_tgt_advance_state();
252 : }
253 :
254 : static void
255 0 : nvmf_tgt_stop_listen(void)
256 : {
257 : struct spdk_nvmf_subsystem *subsystem;
258 : struct spdk_nvmf_subsystem_listener *listener;
259 : const struct spdk_nvme_transport_id *trid;
260 : struct spdk_nvmf_transport *transport;
261 : int rc;
262 :
263 0 : for (subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
264 0 : subsystem != NULL;
265 0 : subsystem = spdk_nvmf_subsystem_get_next(subsystem)) {
266 0 : for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem);
267 0 : listener != NULL;
268 0 : listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) {
269 0 : trid = spdk_nvmf_subsystem_listener_get_trid(listener);
270 0 : transport = spdk_nvmf_tgt_get_transport(g_spdk_nvmf_tgt, trid->trstring);
271 0 : rc = spdk_nvmf_transport_stop_listen(transport, trid);
272 0 : if (rc != 0) {
273 0 : SPDK_ERRLOG("Unable to stop subsystem %s listener %s:%s, rc %d. Trying others.\n",
274 : spdk_nvmf_subsystem_get_nqn(subsystem), trid->traddr, trid->trsvcid, rc);
275 0 : continue;
276 : }
277 : }
278 : }
279 :
280 0 : g_tgt_state = NVMF_TGT_FINI_STOP_SUBSYSTEMS;
281 0 : }
282 :
283 : static void
284 0 : _nvmf_tgt_subsystem_destroy(void *cb_arg)
285 : {
286 : struct spdk_nvmf_subsystem *subsystem, *next_subsystem;
287 : int rc;
288 :
289 0 : subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
290 :
291 0 : while (subsystem != NULL) {
292 0 : next_subsystem = spdk_nvmf_subsystem_get_next(subsystem);
293 0 : rc = spdk_nvmf_subsystem_destroy(subsystem, _nvmf_tgt_subsystem_destroy, NULL);
294 0 : if (rc) {
295 0 : if (rc == -EINPROGRESS) {
296 : /* If ret is -EINPROGRESS, nvmf_tgt_subsystem_destroyed will be called when subsystem
297 : * is destroyed, _nvmf_tgt_subsystem_destroy will continue to destroy other subsystems if any */
298 0 : return;
299 : } else {
300 0 : SPDK_ERRLOG("Unable to destroy subsystem %s, rc %d. Trying others.\n",
301 : spdk_nvmf_subsystem_get_nqn(subsystem), rc);
302 : }
303 : }
304 0 : subsystem = next_subsystem;
305 : }
306 :
307 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_POLL_GROUPS;
308 0 : nvmf_tgt_advance_state();
309 : }
310 :
311 : static void
312 0 : nvmf_tgt_destroy_done(void *ctx, int status)
313 : {
314 0 : g_tgt_state = NVMF_TGT_STOPPED;
315 :
316 0 : nvmf_tgt_advance_state();
317 0 : }
318 :
319 : static int
320 0 : nvmf_add_discovery_subsystem(void)
321 : {
322 : struct spdk_nvmf_subsystem *subsystem;
323 :
324 0 : subsystem = spdk_nvmf_subsystem_create(g_spdk_nvmf_tgt, SPDK_NVMF_DISCOVERY_NQN,
325 : SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT, 0);
326 0 : if (subsystem == NULL) {
327 0 : SPDK_ERRLOG("Failed creating discovery nvmf library subsystem\n");
328 0 : return -1;
329 : }
330 :
331 0 : spdk_nvmf_subsystem_set_allow_any_host(subsystem, true);
332 :
333 0 : return 0;
334 : }
335 :
336 : static int
337 0 : nvmf_tgt_create_target(void)
338 : {
339 0 : g_spdk_nvmf_tgt = spdk_nvmf_tgt_create(&g_spdk_nvmf_tgt_conf.opts);
340 0 : if (!g_spdk_nvmf_tgt) {
341 0 : SPDK_ERRLOG("spdk_nvmf_tgt_create() failed\n");
342 0 : return -1;
343 : }
344 :
345 0 : if (nvmf_add_discovery_subsystem() != 0) {
346 0 : SPDK_ERRLOG("nvmf_add_discovery_subsystem failed\n");
347 0 : return -1;
348 : }
349 :
350 0 : return 0;
351 : }
352 :
353 : static void
354 0 : fixup_identify_ctrlr(struct spdk_nvmf_request *req)
355 : {
356 0 : struct spdk_nvme_ctrlr_data nvme_cdata = {};
357 0 : struct spdk_nvme_ctrlr_data nvmf_cdata = {};
358 0 : struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
359 0 : struct spdk_nvme_cpl *rsp = spdk_nvmf_request_get_response(req);
360 : size_t datalen;
361 : int rc;
362 :
363 : /* This is the identify data from the NVMe drive */
364 0 : datalen = spdk_nvmf_request_copy_to_buf(req, &nvme_cdata,
365 : sizeof(nvme_cdata));
366 :
367 : /* Get the NVMF identify data */
368 0 : rc = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, &nvmf_cdata);
369 0 : if (rc != SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
370 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
371 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
372 0 : return;
373 : }
374 :
375 : /* Fixup NVMF identify data with NVMe identify data */
376 :
377 : /* Serial Number (SN) */
378 0 : memcpy(&nvmf_cdata.sn[0], &nvme_cdata.sn[0], sizeof(nvmf_cdata.sn));
379 : /* Model Number (MN) */
380 0 : memcpy(&nvmf_cdata.mn[0], &nvme_cdata.mn[0], sizeof(nvmf_cdata.mn));
381 : /* Firmware Revision (FR) */
382 0 : memcpy(&nvmf_cdata.fr[0], &nvme_cdata.fr[0], sizeof(nvmf_cdata.fr));
383 : /* IEEE OUI Identifier (IEEE) */
384 0 : memcpy(&nvmf_cdata.ieee[0], &nvme_cdata.ieee[0], sizeof(nvmf_cdata.ieee));
385 : /* FRU Globally Unique Identifier (FGUID) */
386 :
387 : /* Copy the fixed up data back to the response */
388 0 : spdk_nvmf_request_copy_from_buf(req, &nvmf_cdata, datalen);
389 : }
390 :
391 : static int
392 0 : nvmf_custom_identify_hdlr(struct spdk_nvmf_request *req)
393 : {
394 0 : struct spdk_nvme_cmd *cmd = spdk_nvmf_request_get_cmd(req);
395 0 : struct spdk_bdev *bdev;
396 0 : struct spdk_bdev_desc *desc;
397 0 : struct spdk_io_channel *ch;
398 : struct spdk_nvmf_subsystem *subsys;
399 : int rc;
400 :
401 0 : if (cmd->cdw10_bits.identify.cns != SPDK_NVME_IDENTIFY_CTRLR) {
402 0 : return -1; /* continue */
403 : }
404 :
405 0 : subsys = spdk_nvmf_request_get_subsystem(req);
406 0 : if (subsys == NULL) {
407 0 : return -1;
408 : }
409 :
410 : /* Only procss this request if it has exactly one namespace */
411 0 : if (spdk_nvmf_subsystem_get_max_nsid(subsys) != 1) {
412 0 : return -1;
413 : }
414 :
415 : /* Forward to first namespace if it supports NVME admin commands */
416 0 : rc = spdk_nvmf_request_get_bdev(1, req, &bdev, &desc, &ch);
417 0 : if (rc) {
418 : /* No bdev found for this namespace. Continue. */
419 0 : return -1;
420 : }
421 :
422 0 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
423 0 : return -1;
424 : }
425 :
426 0 : return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, fixup_identify_ctrlr);
427 : }
428 :
429 : static void
430 0 : nvmf_tgt_advance_state(void)
431 : {
432 : enum nvmf_tgt_state prev_state;
433 0 : int rc = -1;
434 : int ret;
435 :
436 : do {
437 : SPDK_DTRACE_PROBE1(nvmf_tgt_state, g_tgt_state);
438 0 : prev_state = g_tgt_state;
439 :
440 0 : switch (g_tgt_state) {
441 0 : case NVMF_TGT_INIT_NONE: {
442 0 : g_tgt_state = NVMF_TGT_INIT_CREATE_TARGET;
443 0 : break;
444 : }
445 0 : case NVMF_TGT_INIT_CREATE_TARGET:
446 0 : ret = nvmf_tgt_create_target();
447 0 : g_tgt_state = (ret == 0) ? NVMF_TGT_INIT_CREATE_POLL_GROUPS : NVMF_TGT_ERROR;
448 0 : break;
449 0 : case NVMF_TGT_INIT_CREATE_POLL_GROUPS:
450 0 : if (g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr) {
451 0 : SPDK_NOTICELOG("Custom identify ctrlr handler enabled\n");
452 0 : spdk_nvmf_set_custom_admin_cmd_hdlr(SPDK_NVME_OPC_IDENTIFY, nvmf_custom_identify_hdlr);
453 : }
454 : /* Create poll group threads, and send a message to each thread
455 : * and create a poll group.
456 : */
457 0 : nvmf_tgt_create_poll_groups();
458 0 : break;
459 0 : case NVMF_TGT_INIT_START_SUBSYSTEMS: {
460 : struct spdk_nvmf_subsystem *subsystem;
461 :
462 0 : subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
463 :
464 0 : if (subsystem) {
465 0 : ret = spdk_nvmf_subsystem_start(subsystem, nvmf_tgt_subsystem_started, NULL);
466 0 : if (ret) {
467 0 : SPDK_ERRLOG("Unable to start NVMe-oF subsystem. Stopping app.\n");
468 0 : g_tgt_state = NVMF_TGT_FINI_STOP_LISTEN;
469 : }
470 : } else {
471 0 : g_tgt_state = NVMF_TGT_RUNNING;
472 : }
473 0 : break;
474 : }
475 0 : case NVMF_TGT_RUNNING:
476 0 : spdk_subsystem_init_next(0);
477 0 : break;
478 0 : case NVMF_TGT_FINI_STOP_LISTEN:
479 0 : nvmf_tgt_stop_listen();
480 0 : break;
481 0 : case NVMF_TGT_FINI_STOP_SUBSYSTEMS: {
482 : struct spdk_nvmf_subsystem *subsystem;
483 :
484 0 : subsystem = spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt);
485 :
486 0 : if (subsystem) {
487 0 : ret = spdk_nvmf_subsystem_stop(subsystem, nvmf_tgt_subsystem_stopped, NULL);
488 0 : if (ret) {
489 0 : nvmf_tgt_subsystem_stopped(subsystem, NULL, 0);
490 : }
491 : } else {
492 0 : g_tgt_state = NVMF_TGT_FINI_DESTROY_SUBSYSTEMS;
493 : }
494 0 : break;
495 : }
496 0 : case NVMF_TGT_FINI_DESTROY_SUBSYSTEMS:
497 0 : _nvmf_tgt_subsystem_destroy(NULL);
498 : /* Function above can be asynchronous, it will call nvmf_tgt_advance_state() once done.
499 : * So just return here */
500 0 : return;
501 0 : case NVMF_TGT_FINI_DESTROY_POLL_GROUPS:
502 : /* Send a message to each poll group thread, and terminate the thread */
503 0 : nvmf_tgt_destroy_poll_groups();
504 0 : break;
505 0 : case NVMF_TGT_FINI_DESTROY_TARGET:
506 0 : spdk_nvmf_tgt_destroy(g_spdk_nvmf_tgt, nvmf_tgt_destroy_done, NULL);
507 0 : break;
508 0 : case NVMF_TGT_STOPPED:
509 0 : spdk_subsystem_fini_next();
510 0 : return;
511 0 : case NVMF_TGT_ERROR:
512 0 : spdk_subsystem_init_next(rc);
513 0 : return;
514 : }
515 :
516 0 : } while (g_tgt_state != prev_state);
517 : }
518 :
519 : static void
520 0 : nvmf_subsystem_init(void)
521 : {
522 0 : g_tgt_state = NVMF_TGT_INIT_NONE;
523 0 : nvmf_tgt_advance_state();
524 0 : }
525 :
526 : static void
527 0 : nvmf_subsystem_dump_discover_filter(struct spdk_json_write_ctx *w)
528 : {
529 : static char const *const answers[] = {
530 : "match_any",
531 : "transport",
532 : "address",
533 : "transport,address",
534 : "svcid",
535 : "transport,svcid",
536 : "address,svcid",
537 : "transport,address,svcid"
538 : };
539 :
540 0 : if ((g_spdk_nvmf_tgt_conf.opts.discovery_filter & ~(SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_TYPE |
541 : SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_ADDRESS |
542 : SPDK_NVMF_TGT_DISCOVERY_MATCH_TRANSPORT_SVCID)) != 0) {
543 0 : SPDK_ERRLOG("Incorrect discovery filter %d\n", g_spdk_nvmf_tgt_conf.opts.discovery_filter);
544 0 : assert(0);
545 : return;
546 : }
547 :
548 0 : spdk_json_write_named_string(w, "discovery_filter",
549 0 : answers[g_spdk_nvmf_tgt_conf.opts.discovery_filter]);
550 : }
551 :
552 : static void
553 0 : nvmf_subsystem_write_config_json(struct spdk_json_write_ctx *w)
554 : {
555 0 : spdk_json_write_array_begin(w);
556 :
557 0 : spdk_json_write_object_begin(w);
558 0 : spdk_json_write_named_string(w, "method", "nvmf_set_config");
559 :
560 0 : spdk_json_write_named_object_begin(w, "params");
561 0 : nvmf_subsystem_dump_discover_filter(w);
562 0 : spdk_json_write_named_object_begin(w, "admin_cmd_passthru");
563 0 : spdk_json_write_named_bool(w, "identify_ctrlr",
564 0 : g_spdk_nvmf_tgt_conf.admin_passthru.identify_ctrlr);
565 0 : spdk_json_write_object_end(w);
566 0 : if (g_poll_groups_mask) {
567 0 : spdk_json_write_named_string(w, "poll_groups_mask", spdk_cpuset_fmt(g_poll_groups_mask));
568 : }
569 0 : spdk_json_write_object_end(w);
570 0 : spdk_json_write_object_end(w);
571 :
572 0 : spdk_nvmf_tgt_write_config_json(w, g_spdk_nvmf_tgt);
573 0 : spdk_json_write_array_end(w);
574 0 : }
575 :
576 : static struct spdk_subsystem g_spdk_subsystem_nvmf = {
577 : .name = "nvmf",
578 : .init = nvmf_subsystem_init,
579 : .fini = nvmf_subsystem_fini,
580 : .write_config_json = nvmf_subsystem_write_config_json,
581 : };
582 :
583 0 : SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_nvmf)
584 0 : SPDK_SUBSYSTEM_DEPEND(nvmf, bdev)
585 0 : SPDK_SUBSYSTEM_DEPEND(nvmf, keyring)
586 0 : SPDK_SUBSYSTEM_DEPEND(nvmf, sock)
|