Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "nvmf_internal.h"
10 : #include "transport.h"
11 :
12 : #include "spdk/assert.h"
13 : #include "spdk/likely.h"
14 : #include "spdk/string.h"
15 : #include "spdk/trace.h"
16 : #include "spdk/nvmf_spec.h"
17 : #include "spdk/uuid.h"
18 : #include "spdk/json.h"
19 : #include "spdk/file.h"
20 : #include "spdk/bit_array.h"
21 : #include "spdk/bdev.h"
22 :
23 : #define __SPDK_BDEV_MODULE_ONLY
24 : #include "spdk/bdev_module.h"
25 : #include "spdk/log.h"
26 : #include "spdk_internal/utf.h"
27 : #include "spdk_internal/usdt.h"
28 :
29 : #define MODEL_NUMBER_DEFAULT "SPDK bdev Controller"
30 : #define NVMF_SUBSYSTEM_DEFAULT_NAMESPACES 32
31 :
32 : /*
33 : * States for parsing valid domains in NQNs according to RFC 1034
34 : */
35 : enum spdk_nvmf_nqn_domain_states {
36 : /* First character of a domain must be a letter */
37 : SPDK_NVMF_DOMAIN_ACCEPT_LETTER = 0,
38 :
39 : /* Subsequent characters can be any of letter, digit, or hyphen */
40 : SPDK_NVMF_DOMAIN_ACCEPT_LDH = 1,
41 :
42 : /* A domain label must end with either a letter or digit */
43 : SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2
44 : };
45 :
46 : static int _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem);
47 :
48 : /* Returns true if is a valid ASCII string as defined by the NVMe spec */
49 : static bool
50 3 : nvmf_valid_ascii_string(const void *buf, size_t size)
51 : {
52 3 : const uint8_t *str = buf;
53 : size_t i;
54 :
55 35 : for (i = 0; i < size; i++) {
56 33 : if (str[i] < 0x20 || str[i] > 0x7E) {
57 1 : return false;
58 : }
59 : }
60 :
61 2 : return true;
62 : }
63 :
64 : bool
65 34 : nvmf_nqn_is_valid(const char *nqn)
66 : {
67 : size_t len;
68 34 : struct spdk_uuid uuid_value;
69 : uint32_t i;
70 : int bytes_consumed;
71 : uint32_t domain_label_length;
72 : char *reverse_domain_end;
73 : uint32_t reverse_domain_end_index;
74 34 : enum spdk_nvmf_nqn_domain_states domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER;
75 :
76 : /* Check for length requirements */
77 34 : len = strlen(nqn);
78 34 : if (len > SPDK_NVMF_NQN_MAX_LEN) {
79 1 : SPDK_ERRLOG("Invalid NQN \"%s\": length %zu > max %d\n", nqn, len, SPDK_NVMF_NQN_MAX_LEN);
80 1 : return false;
81 : }
82 :
83 : /* The nqn must be at least as long as SPDK_NVMF_NQN_MIN_LEN to contain the necessary prefix. */
84 33 : if (len < SPDK_NVMF_NQN_MIN_LEN) {
85 1 : SPDK_ERRLOG("Invalid NQN \"%s\": length %zu < min %d\n", nqn, len, SPDK_NVMF_NQN_MIN_LEN);
86 1 : return false;
87 : }
88 :
89 : /* Check for discovery controller nqn */
90 32 : if (!strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN)) {
91 2 : return true;
92 : }
93 :
94 : /* Check for equality with the generic nqn structure of the form "nqn.2014-08.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555" */
95 30 : if (!strncmp(nqn, SPDK_NVMF_NQN_UUID_PRE, SPDK_NVMF_NQN_UUID_PRE_LEN)) {
96 6 : if (len != SPDK_NVMF_NQN_UUID_PRE_LEN + SPDK_NVMF_UUID_STRING_LEN) {
97 2 : SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not the correct length\n", nqn);
98 2 : return false;
99 : }
100 :
101 4 : if (spdk_uuid_parse(&uuid_value, &nqn[SPDK_NVMF_NQN_UUID_PRE_LEN])) {
102 2 : SPDK_ERRLOG("Invalid NQN \"%s\": uuid is not formatted correctly\n", nqn);
103 2 : return false;
104 : }
105 2 : return true;
106 : }
107 :
108 : /* If the nqn does not match the uuid structure, the next several checks validate the form "nqn.yyyy-mm.reverse.domain:user-string" */
109 :
110 24 : if (strncmp(nqn, "nqn.", 4) != 0) {
111 0 : SPDK_ERRLOG("Invalid NQN \"%s\": NQN must begin with \"nqn.\".\n", nqn);
112 0 : return false;
113 : }
114 :
115 : /* Check for yyyy-mm. */
116 24 : if (!(isdigit(nqn[4]) && isdigit(nqn[5]) && isdigit(nqn[6]) && isdigit(nqn[7]) &&
117 24 : nqn[8] == '-' && isdigit(nqn[9]) && isdigit(nqn[10]) && nqn[11] == '.')) {
118 0 : SPDK_ERRLOG("Invalid date code in NQN \"%s\"\n", nqn);
119 0 : return false;
120 : }
121 :
122 24 : reverse_domain_end = strchr(nqn, ':');
123 24 : if (reverse_domain_end != NULL && (reverse_domain_end_index = reverse_domain_end - nqn) < len - 1) {
124 : } else {
125 1 : SPDK_ERRLOG("Invalid NQN \"%s\". NQN must contain user specified name with a ':' as a prefix.\n",
126 : nqn);
127 1 : return false;
128 : }
129 :
130 : /* Check for valid reverse domain */
131 23 : domain_label_length = 0;
132 250 : for (i = 12; i < reverse_domain_end_index; i++) {
133 233 : if (domain_label_length > SPDK_DOMAIN_LABEL_MAX_LEN) {
134 1 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". At least one Label is too long.\n", nqn);
135 1 : return false;
136 : }
137 :
138 232 : switch (domain_state) {
139 :
140 47 : case SPDK_NVMF_DOMAIN_ACCEPT_LETTER: {
141 47 : if (isalpha(nqn[i])) {
142 43 : domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
143 43 : domain_label_length++;
144 43 : break;
145 : } else {
146 4 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must start with a letter.\n", nqn);
147 4 : return false;
148 : }
149 : }
150 :
151 4 : case SPDK_NVMF_DOMAIN_ACCEPT_LDH: {
152 4 : if (isalpha(nqn[i]) || isdigit(nqn[i])) {
153 3 : domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
154 3 : domain_label_length++;
155 3 : break;
156 1 : } else if (nqn[i] == '-') {
157 1 : if (i == reverse_domain_end_index - 1) {
158 0 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
159 : nqn);
160 0 : return false;
161 : }
162 1 : domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH;
163 1 : domain_label_length++;
164 1 : break;
165 0 : } else if (nqn[i] == '.') {
166 0 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
167 : nqn);
168 0 : return false;
169 : } else {
170 0 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n",
171 : nqn);
172 0 : return false;
173 : }
174 : }
175 :
176 181 : case SPDK_NVMF_DOMAIN_ACCEPT_ANY: {
177 181 : if (isalpha(nqn[i]) || isdigit(nqn[i])) {
178 153 : domain_state = SPDK_NVMF_DOMAIN_ACCEPT_ANY;
179 153 : domain_label_length++;
180 153 : break;
181 28 : } else if (nqn[i] == '-') {
182 4 : if (i == reverse_domain_end_index - 1) {
183 1 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must end with an alphanumeric symbol.\n",
184 : nqn);
185 1 : return false;
186 : }
187 3 : domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LDH;
188 3 : domain_label_length++;
189 3 : break;
190 24 : } else if (nqn[i] == '.') {
191 24 : domain_state = SPDK_NVMF_DOMAIN_ACCEPT_LETTER;
192 24 : domain_label_length = 0;
193 24 : break;
194 : } else {
195 0 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only [a-z,A-Z,0-9,'-','.'].\n",
196 : nqn);
197 0 : return false;
198 : }
199 : }
200 : }
201 227 : }
202 :
203 17 : i = reverse_domain_end_index + 1;
204 348 : while (i < len) {
205 332 : bytes_consumed = utf8_valid(&nqn[i], &nqn[len]);
206 332 : if (bytes_consumed <= 0) {
207 1 : SPDK_ERRLOG("Invalid domain name in NQN \"%s\". Label names must contain only valid utf-8.\n", nqn);
208 1 : return false;
209 : }
210 :
211 331 : i += bytes_consumed;
212 : }
213 16 : return true;
214 : }
215 :
216 : static void subsystem_state_change_on_pg(struct spdk_io_channel_iter *i);
217 :
218 : struct spdk_nvmf_subsystem *
219 24 : spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt,
220 : const char *nqn,
221 : enum spdk_nvmf_subtype type,
222 : uint32_t num_ns)
223 : {
224 : struct spdk_nvmf_subsystem *subsystem;
225 : uint32_t sid;
226 :
227 24 : if (spdk_nvmf_tgt_find_subsystem(tgt, nqn)) {
228 0 : SPDK_ERRLOG("Subsystem NQN '%s' already exists\n", nqn);
229 0 : return NULL;
230 : }
231 :
232 24 : if (!nvmf_nqn_is_valid(nqn)) {
233 11 : return NULL;
234 : }
235 :
236 13 : if (type == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT ||
237 : type == SPDK_NVMF_SUBTYPE_DISCOVERY) {
238 1 : if (num_ns != 0) {
239 0 : SPDK_ERRLOG("Discovery subsystem cannot have namespaces.\n");
240 0 : return NULL;
241 : }
242 12 : } else if (num_ns == 0) {
243 12 : num_ns = NVMF_SUBSYSTEM_DEFAULT_NAMESPACES;
244 : }
245 :
246 : /* Find a free subsystem id (sid) */
247 13 : sid = spdk_bit_array_find_first_clear(tgt->subsystem_ids, 0);
248 13 : if (sid == UINT32_MAX) {
249 0 : return NULL;
250 : }
251 13 : subsystem = calloc(1, sizeof(struct spdk_nvmf_subsystem));
252 13 : if (subsystem == NULL) {
253 0 : return NULL;
254 : }
255 :
256 13 : subsystem->thread = spdk_get_thread();
257 13 : subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
258 13 : subsystem->tgt = tgt;
259 13 : subsystem->id = sid;
260 13 : subsystem->subtype = type;
261 13 : subsystem->max_nsid = num_ns;
262 13 : subsystem->next_cntlid = 0;
263 13 : subsystem->min_cntlid = NVMF_MIN_CNTLID;
264 13 : subsystem->max_cntlid = NVMF_MAX_CNTLID;
265 13 : snprintf(subsystem->subnqn, sizeof(subsystem->subnqn), "%s", nqn);
266 13 : pthread_mutex_init(&subsystem->mutex, NULL);
267 13 : TAILQ_INIT(&subsystem->listeners);
268 13 : TAILQ_INIT(&subsystem->hosts);
269 13 : TAILQ_INIT(&subsystem->ctrlrs);
270 13 : TAILQ_INIT(&subsystem->state_changes);
271 13 : subsystem->used_listener_ids = spdk_bit_array_create(NVMF_MAX_LISTENERS_PER_SUBSYSTEM);
272 13 : if (subsystem->used_listener_ids == NULL) {
273 0 : pthread_mutex_destroy(&subsystem->mutex);
274 0 : free(subsystem);
275 0 : return NULL;
276 : }
277 :
278 13 : if (num_ns != 0) {
279 12 : subsystem->ns = calloc(num_ns, sizeof(struct spdk_nvmf_ns *));
280 12 : if (subsystem->ns == NULL) {
281 0 : SPDK_ERRLOG("Namespace memory allocation failed\n");
282 0 : pthread_mutex_destroy(&subsystem->mutex);
283 0 : spdk_bit_array_free(&subsystem->used_listener_ids);
284 0 : free(subsystem);
285 0 : return NULL;
286 : }
287 12 : subsystem->ana_group = calloc(num_ns, sizeof(uint32_t));
288 12 : if (subsystem->ana_group == NULL) {
289 0 : SPDK_ERRLOG("ANA group memory allocation failed\n");
290 0 : pthread_mutex_destroy(&subsystem->mutex);
291 0 : free(subsystem->ns);
292 0 : spdk_bit_array_free(&subsystem->used_listener_ids);
293 0 : free(subsystem);
294 0 : return NULL;
295 : }
296 : }
297 :
298 13 : memset(subsystem->sn, '0', sizeof(subsystem->sn) - 1);
299 13 : subsystem->sn[sizeof(subsystem->sn) - 1] = '\0';
300 :
301 13 : snprintf(subsystem->mn, sizeof(subsystem->mn), "%s",
302 : MODEL_NUMBER_DEFAULT);
303 :
304 13 : spdk_bit_array_set(tgt->subsystem_ids, sid);
305 13 : RB_INSERT(subsystem_tree, &tgt->subsystems, subsystem);
306 :
307 : SPDK_DTRACE_PROBE1(nvmf_subsystem_create, subsystem->subnqn);
308 :
309 13 : return subsystem;
310 : }
311 :
312 : /* Must hold subsystem->mutex while calling this function */
313 : static void
314 3 : nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_host *host)
315 : {
316 3 : TAILQ_REMOVE(&subsystem->hosts, host, link);
317 3 : free(host);
318 3 : }
319 :
320 : static void
321 7 : _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
322 : struct spdk_nvmf_subsystem_listener *listener,
323 : bool stop)
324 : {
325 : struct spdk_nvmf_transport *transport;
326 : struct spdk_nvmf_ctrlr *ctrlr;
327 :
328 7 : if (stop) {
329 0 : transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, listener->trid->trstring);
330 0 : if (transport != NULL) {
331 0 : spdk_nvmf_transport_stop_listen(transport, listener->trid);
332 : }
333 : }
334 :
335 7 : TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
336 0 : if (ctrlr->listener == listener) {
337 0 : ctrlr->listener = NULL;
338 : }
339 : }
340 :
341 7 : TAILQ_REMOVE(&subsystem->listeners, listener, link);
342 7 : nvmf_update_discovery_log(listener->subsystem->tgt, NULL);
343 7 : free(listener->ana_state);
344 7 : spdk_bit_array_clear(subsystem->used_listener_ids, listener->id);
345 7 : free(listener);
346 7 : }
347 :
348 : static void
349 0 : _nvmf_subsystem_destroy_msg(void *cb_arg)
350 : {
351 0 : struct spdk_nvmf_subsystem *subsystem = cb_arg;
352 :
353 0 : _nvmf_subsystem_destroy(subsystem);
354 0 : }
355 :
356 : static int
357 13 : _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
358 : {
359 : struct nvmf_subsystem_state_change_ctx *ctx;
360 : struct spdk_nvmf_ns *ns;
361 13 : nvmf_subsystem_destroy_cb async_destroy_cb = NULL;
362 13 : void *async_destroy_cb_arg = NULL;
363 : int rc;
364 :
365 13 : if (!TAILQ_EMPTY(&subsystem->ctrlrs)) {
366 0 : SPDK_DEBUGLOG(nvmf, "subsystem %p %s has active controllers\n", subsystem, subsystem->subnqn);
367 0 : subsystem->async_destroy = true;
368 0 : rc = spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_destroy_msg, subsystem);
369 0 : if (rc) {
370 0 : SPDK_ERRLOG("Failed to send thread msg, rc %d\n", rc);
371 0 : assert(0);
372 : return rc;
373 : }
374 0 : return -EINPROGRESS;
375 : }
376 :
377 13 : ns = spdk_nvmf_subsystem_get_first_ns(subsystem);
378 13 : while (ns != NULL) {
379 0 : struct spdk_nvmf_ns *next_ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns);
380 :
381 0 : spdk_nvmf_subsystem_remove_ns(subsystem, ns->opts.nsid);
382 0 : ns = next_ns;
383 : }
384 :
385 13 : while ((ctx = TAILQ_FIRST(&subsystem->state_changes))) {
386 0 : SPDK_WARNLOG("subsystem %s has pending state change requests\n", subsystem->subnqn);
387 0 : TAILQ_REMOVE(&subsystem->state_changes, ctx, link);
388 0 : if (ctx->cb_fn != NULL) {
389 0 : ctx->cb_fn(subsystem, ctx->cb_arg, -ECANCELED);
390 : }
391 0 : free(ctx);
392 : }
393 :
394 13 : free(subsystem->ns);
395 13 : free(subsystem->ana_group);
396 :
397 13 : RB_REMOVE(subsystem_tree, &subsystem->tgt->subsystems, subsystem);
398 13 : assert(spdk_bit_array_get(subsystem->tgt->subsystem_ids, subsystem->id) == true);
399 13 : spdk_bit_array_clear(subsystem->tgt->subsystem_ids, subsystem->id);
400 :
401 13 : pthread_mutex_destroy(&subsystem->mutex);
402 :
403 13 : spdk_bit_array_free(&subsystem->used_listener_ids);
404 :
405 13 : if (subsystem->async_destroy) {
406 0 : async_destroy_cb = subsystem->async_destroy_cb;
407 0 : async_destroy_cb_arg = subsystem->async_destroy_cb_arg;
408 : }
409 :
410 13 : free(subsystem);
411 :
412 13 : if (async_destroy_cb) {
413 0 : async_destroy_cb(async_destroy_cb_arg);
414 : }
415 :
416 13 : return 0;
417 : }
418 :
419 : static struct spdk_nvmf_ns *
420 0 : _nvmf_subsystem_get_first_zoned_ns(struct spdk_nvmf_subsystem *subsystem)
421 : {
422 0 : struct spdk_nvmf_ns *ns = spdk_nvmf_subsystem_get_first_ns(subsystem);
423 0 : while (ns != NULL) {
424 0 : if (ns->csi == SPDK_NVME_CSI_ZNS) {
425 0 : return ns;
426 : }
427 0 : ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns);
428 : }
429 0 : return NULL;
430 : }
431 :
432 : int
433 13 : spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb,
434 : void *cpl_cb_arg)
435 : {
436 : struct spdk_nvmf_host *host, *host_tmp;
437 : struct spdk_nvmf_transport *transport;
438 :
439 13 : if (!subsystem) {
440 0 : return -EINVAL;
441 : }
442 :
443 : SPDK_DTRACE_PROBE1(nvmf_subsystem_destroy, subsystem->subnqn);
444 :
445 13 : assert(spdk_get_thread() == subsystem->thread);
446 :
447 13 : if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) {
448 0 : SPDK_ERRLOG("Subsystem can only be destroyed in inactive state, %s state %d\n",
449 : subsystem->subnqn, subsystem->state);
450 0 : return -EAGAIN;
451 : }
452 13 : if (subsystem->destroying) {
453 0 : SPDK_ERRLOG("Subsystem destruction is already started\n");
454 0 : assert(0);
455 : return -EALREADY;
456 : }
457 :
458 13 : subsystem->destroying = true;
459 :
460 13 : SPDK_DEBUGLOG(nvmf, "subsystem is %p %s\n", subsystem, subsystem->subnqn);
461 :
462 13 : nvmf_subsystem_remove_all_listeners(subsystem, false);
463 :
464 13 : pthread_mutex_lock(&subsystem->mutex);
465 :
466 13 : TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) {
467 0 : for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
468 0 : transport = spdk_nvmf_transport_get_next(transport)) {
469 0 : if (transport->ops->subsystem_remove_host) {
470 0 : transport->ops->subsystem_remove_host(transport, subsystem, host->nqn);
471 : }
472 : }
473 0 : nvmf_subsystem_remove_host(subsystem, host);
474 : }
475 :
476 13 : pthread_mutex_unlock(&subsystem->mutex);
477 :
478 13 : subsystem->async_destroy_cb = cpl_cb;
479 13 : subsystem->async_destroy_cb_arg = cpl_cb_arg;
480 :
481 13 : return _nvmf_subsystem_destroy(subsystem);
482 : }
483 :
484 : /* we have to use the typedef in the function declaration to appease astyle. */
485 : typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t;
486 :
487 : static spdk_nvmf_subsystem_state_t
488 9 : nvmf_subsystem_get_intermediate_state(enum spdk_nvmf_subsystem_state current_state,
489 : enum spdk_nvmf_subsystem_state requested_state)
490 : {
491 9 : switch (requested_state) {
492 2 : case SPDK_NVMF_SUBSYSTEM_INACTIVE:
493 2 : return SPDK_NVMF_SUBSYSTEM_DEACTIVATING;
494 4 : case SPDK_NVMF_SUBSYSTEM_ACTIVE:
495 4 : if (current_state == SPDK_NVMF_SUBSYSTEM_PAUSED) {
496 2 : return SPDK_NVMF_SUBSYSTEM_RESUMING;
497 : } else {
498 2 : return SPDK_NVMF_SUBSYSTEM_ACTIVATING;
499 : }
500 3 : case SPDK_NVMF_SUBSYSTEM_PAUSED:
501 3 : return SPDK_NVMF_SUBSYSTEM_PAUSING;
502 0 : default:
503 0 : assert(false);
504 : return SPDK_NVMF_SUBSYSTEM_NUM_STATES;
505 : }
506 : }
507 :
508 : static int
509 18 : nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem,
510 : enum spdk_nvmf_subsystem_state state)
511 : {
512 18 : enum spdk_nvmf_subsystem_state actual_old_state, expected_old_state;
513 : bool exchanged;
514 :
515 18 : switch (state) {
516 2 : case SPDK_NVMF_SUBSYSTEM_INACTIVE:
517 2 : expected_old_state = SPDK_NVMF_SUBSYSTEM_DEACTIVATING;
518 2 : break;
519 2 : case SPDK_NVMF_SUBSYSTEM_ACTIVATING:
520 2 : expected_old_state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
521 2 : break;
522 4 : case SPDK_NVMF_SUBSYSTEM_ACTIVE:
523 4 : expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING;
524 4 : break;
525 3 : case SPDK_NVMF_SUBSYSTEM_PAUSING:
526 3 : expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
527 3 : break;
528 3 : case SPDK_NVMF_SUBSYSTEM_PAUSED:
529 3 : expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSING;
530 3 : break;
531 2 : case SPDK_NVMF_SUBSYSTEM_RESUMING:
532 2 : expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED;
533 2 : break;
534 2 : case SPDK_NVMF_SUBSYSTEM_DEACTIVATING:
535 2 : expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
536 2 : break;
537 0 : default:
538 0 : assert(false);
539 : return -1;
540 : }
541 :
542 18 : actual_old_state = expected_old_state;
543 18 : exchanged = __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false,
544 : __ATOMIC_RELAXED, __ATOMIC_RELAXED);
545 18 : if (spdk_unlikely(exchanged == false)) {
546 3 : if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING &&
547 : state == SPDK_NVMF_SUBSYSTEM_ACTIVE) {
548 2 : expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING;
549 : }
550 : /* This is for the case when activating the subsystem fails. */
551 3 : if (actual_old_state == SPDK_NVMF_SUBSYSTEM_ACTIVATING &&
552 : state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) {
553 0 : expected_old_state = SPDK_NVMF_SUBSYSTEM_ACTIVATING;
554 : }
555 : /* This is for the case when resuming the subsystem fails. */
556 3 : if (actual_old_state == SPDK_NVMF_SUBSYSTEM_RESUMING &&
557 : state == SPDK_NVMF_SUBSYSTEM_PAUSING) {
558 0 : expected_old_state = SPDK_NVMF_SUBSYSTEM_RESUMING;
559 : }
560 : /* This is for the case when stopping paused subsystem */
561 3 : if (actual_old_state == SPDK_NVMF_SUBSYSTEM_PAUSED &&
562 : state == SPDK_NVMF_SUBSYSTEM_DEACTIVATING) {
563 1 : expected_old_state = SPDK_NVMF_SUBSYSTEM_PAUSED;
564 : }
565 3 : actual_old_state = expected_old_state;
566 3 : __atomic_compare_exchange_n(&subsystem->state, &actual_old_state, state, false,
567 : __ATOMIC_RELAXED, __ATOMIC_RELAXED);
568 : }
569 18 : assert(actual_old_state == expected_old_state);
570 18 : return actual_old_state - expected_old_state;
571 : }
572 :
573 : static void nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx);
574 :
575 : static void
576 10 : _nvmf_subsystem_state_change_complete(void *_ctx)
577 : {
578 10 : struct nvmf_subsystem_state_change_ctx *next, *ctx = _ctx;
579 10 : struct spdk_nvmf_subsystem *subsystem = ctx->subsystem;
580 :
581 10 : pthread_mutex_lock(&subsystem->mutex);
582 10 : assert(TAILQ_FIRST(&subsystem->state_changes) == ctx);
583 10 : TAILQ_REMOVE(&subsystem->state_changes, ctx, link);
584 10 : next = TAILQ_FIRST(&subsystem->state_changes);
585 10 : pthread_mutex_unlock(&subsystem->mutex);
586 :
587 10 : if (ctx->cb_fn != NULL) {
588 3 : ctx->cb_fn(subsystem, ctx->cb_arg, ctx->status);
589 : }
590 10 : free(ctx);
591 :
592 10 : if (next != NULL) {
593 1 : nvmf_subsystem_do_state_change(next);
594 : }
595 10 : }
596 :
597 : static void
598 10 : nvmf_subsystem_state_change_complete(struct nvmf_subsystem_state_change_ctx *ctx, int status)
599 : {
600 10 : ctx->status = status;
601 10 : spdk_thread_exec_msg(ctx->thread, _nvmf_subsystem_state_change_complete, ctx);
602 10 : }
603 :
604 : static void
605 0 : subsystem_state_change_revert_done(struct spdk_io_channel_iter *i, int status)
606 : {
607 0 : struct nvmf_subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
608 :
609 : /* Nothing to be done here if the state setting fails, we are just screwed. */
610 0 : if (nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state)) {
611 0 : SPDK_ERRLOG("Unable to revert the subsystem state after operation failure.\n");
612 : }
613 :
614 : /* return a failure here. This function only exists in an error path. */
615 0 : nvmf_subsystem_state_change_complete(ctx, -1);
616 0 : }
617 :
618 : static void
619 9 : subsystem_state_change_done(struct spdk_io_channel_iter *i, int status)
620 : {
621 9 : struct nvmf_subsystem_state_change_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
622 : enum spdk_nvmf_subsystem_state intermediate_state;
623 :
624 : SPDK_DTRACE_PROBE4(nvmf_subsystem_change_state_done, ctx->subsystem->subnqn,
625 : ctx->requested_state, ctx->original_state, status);
626 :
627 9 : if (status == 0) {
628 9 : status = nvmf_subsystem_set_state(ctx->subsystem, ctx->requested_state);
629 9 : if (status) {
630 0 : status = -1;
631 : }
632 : }
633 :
634 9 : if (status) {
635 0 : intermediate_state = nvmf_subsystem_get_intermediate_state(ctx->requested_state,
636 : ctx->original_state);
637 0 : assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES);
638 :
639 0 : if (nvmf_subsystem_set_state(ctx->subsystem, intermediate_state)) {
640 0 : goto out;
641 : }
642 0 : ctx->requested_state = ctx->original_state;
643 0 : spdk_for_each_channel(ctx->subsystem->tgt,
644 : subsystem_state_change_on_pg,
645 : ctx,
646 : subsystem_state_change_revert_done);
647 0 : return;
648 : }
649 :
650 9 : out:
651 9 : nvmf_subsystem_state_change_complete(ctx, status);
652 : }
653 :
654 : static void
655 0 : subsystem_state_change_continue(void *ctx, int status)
656 : {
657 0 : struct spdk_io_channel_iter *i = ctx;
658 : struct nvmf_subsystem_state_change_ctx *_ctx __attribute__((unused));
659 :
660 0 : _ctx = spdk_io_channel_iter_get_ctx(i);
661 : SPDK_DTRACE_PROBE3(nvmf_pg_change_state_done, _ctx->subsystem->subnqn,
662 : _ctx->requested_state, spdk_thread_get_id(spdk_get_thread()));
663 :
664 0 : spdk_for_each_channel_continue(i, status);
665 0 : }
666 :
667 : static void
668 0 : subsystem_state_change_on_pg(struct spdk_io_channel_iter *i)
669 : {
670 : struct nvmf_subsystem_state_change_ctx *ctx;
671 : struct spdk_io_channel *ch;
672 : struct spdk_nvmf_poll_group *group;
673 :
674 0 : ctx = spdk_io_channel_iter_get_ctx(i);
675 0 : ch = spdk_io_channel_iter_get_channel(i);
676 0 : group = spdk_io_channel_get_ctx(ch);
677 :
678 : SPDK_DTRACE_PROBE3(nvmf_pg_change_state, ctx->subsystem->subnqn,
679 : ctx->requested_state, spdk_thread_get_id(spdk_get_thread()));
680 0 : switch (ctx->requested_state) {
681 0 : case SPDK_NVMF_SUBSYSTEM_INACTIVE:
682 0 : nvmf_poll_group_remove_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
683 0 : break;
684 0 : case SPDK_NVMF_SUBSYSTEM_ACTIVE:
685 0 : if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVATING) {
686 0 : nvmf_poll_group_add_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
687 0 : } else if (ctx->subsystem->state == SPDK_NVMF_SUBSYSTEM_RESUMING) {
688 0 : nvmf_poll_group_resume_subsystem(group, ctx->subsystem, subsystem_state_change_continue, i);
689 : }
690 0 : break;
691 0 : case SPDK_NVMF_SUBSYSTEM_PAUSED:
692 0 : nvmf_poll_group_pause_subsystem(group, ctx->subsystem, ctx->nsid, subsystem_state_change_continue,
693 : i);
694 0 : break;
695 0 : default:
696 0 : assert(false);
697 : break;
698 : }
699 0 : }
700 :
701 : static void
702 10 : nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx)
703 : {
704 10 : struct spdk_nvmf_subsystem *subsystem = ctx->subsystem;
705 : enum spdk_nvmf_subsystem_state intermediate_state;
706 : int rc;
707 :
708 : SPDK_DTRACE_PROBE3(nvmf_subsystem_change_state, subsystem->subnqn,
709 : ctx->requested_state, subsystem->state);
710 :
711 : /* If we are already in the requested state, just call the callback immediately. */
712 10 : if (subsystem->state == ctx->requested_state) {
713 1 : nvmf_subsystem_state_change_complete(ctx, 0);
714 1 : return;
715 : }
716 :
717 9 : intermediate_state = nvmf_subsystem_get_intermediate_state(subsystem->state,
718 : ctx->requested_state);
719 9 : assert(intermediate_state != SPDK_NVMF_SUBSYSTEM_NUM_STATES);
720 :
721 9 : ctx->original_state = subsystem->state;
722 9 : rc = nvmf_subsystem_set_state(subsystem, intermediate_state);
723 9 : if (rc) {
724 0 : nvmf_subsystem_state_change_complete(ctx, -1);
725 0 : return;
726 : }
727 :
728 9 : spdk_for_each_channel(subsystem->tgt,
729 : subsystem_state_change_on_pg,
730 : ctx,
731 : subsystem_state_change_done);
732 : }
733 :
734 :
735 : static int
736 10 : nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
737 : uint32_t nsid,
738 : enum spdk_nvmf_subsystem_state requested_state,
739 : spdk_nvmf_subsystem_state_change_done cb_fn,
740 : void *cb_arg)
741 : {
742 : struct nvmf_subsystem_state_change_ctx *ctx;
743 : struct spdk_thread *thread;
744 :
745 10 : thread = spdk_get_thread();
746 10 : if (thread == NULL) {
747 0 : return -EINVAL;
748 : }
749 :
750 10 : ctx = calloc(1, sizeof(*ctx));
751 10 : if (!ctx) {
752 0 : return -ENOMEM;
753 : }
754 :
755 10 : ctx->subsystem = subsystem;
756 10 : ctx->nsid = nsid;
757 10 : ctx->requested_state = requested_state;
758 10 : ctx->cb_fn = cb_fn;
759 10 : ctx->cb_arg = cb_arg;
760 10 : ctx->thread = thread;
761 :
762 10 : pthread_mutex_lock(&subsystem->mutex);
763 10 : TAILQ_INSERT_TAIL(&subsystem->state_changes, ctx, link);
764 10 : if (ctx != TAILQ_FIRST(&subsystem->state_changes)) {
765 1 : pthread_mutex_unlock(&subsystem->mutex);
766 1 : return 0;
767 : }
768 9 : pthread_mutex_unlock(&subsystem->mutex);
769 :
770 9 : nvmf_subsystem_do_state_change(ctx);
771 :
772 9 : return 0;
773 : }
774 :
775 : int
776 2 : spdk_nvmf_subsystem_start(struct spdk_nvmf_subsystem *subsystem,
777 : spdk_nvmf_subsystem_state_change_done cb_fn,
778 : void *cb_arg)
779 : {
780 2 : return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
781 : }
782 :
783 : int
784 3 : spdk_nvmf_subsystem_stop(struct spdk_nvmf_subsystem *subsystem,
785 : spdk_nvmf_subsystem_state_change_done cb_fn,
786 : void *cb_arg)
787 : {
788 3 : return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_INACTIVE, cb_fn, cb_arg);
789 : }
790 :
791 : int
792 3 : spdk_nvmf_subsystem_pause(struct spdk_nvmf_subsystem *subsystem,
793 : uint32_t nsid,
794 : spdk_nvmf_subsystem_state_change_done cb_fn,
795 : void *cb_arg)
796 : {
797 3 : return nvmf_subsystem_state_change(subsystem, nsid, SPDK_NVMF_SUBSYSTEM_PAUSED, cb_fn, cb_arg);
798 : }
799 :
800 : int
801 2 : spdk_nvmf_subsystem_resume(struct spdk_nvmf_subsystem *subsystem,
802 : spdk_nvmf_subsystem_state_change_done cb_fn,
803 : void *cb_arg)
804 : {
805 2 : return nvmf_subsystem_state_change(subsystem, 0, SPDK_NVMF_SUBSYSTEM_ACTIVE, cb_fn, cb_arg);
806 : }
807 :
808 : struct spdk_nvmf_subsystem *
809 33 : spdk_nvmf_subsystem_get_first(struct spdk_nvmf_tgt *tgt)
810 : {
811 33 : return RB_MIN(subsystem_tree, &tgt->subsystems);
812 : }
813 :
814 : struct spdk_nvmf_subsystem *
815 32 : spdk_nvmf_subsystem_get_next(struct spdk_nvmf_subsystem *subsystem)
816 : {
817 32 : if (!subsystem) {
818 0 : return NULL;
819 : }
820 :
821 32 : return RB_NEXT(subsystem_tree, &tgt->subsystems, subsystem);
822 : }
823 :
824 : /* Must hold subsystem->mutex while calling this function */
825 : static struct spdk_nvmf_host *
826 14 : nvmf_subsystem_find_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
827 : {
828 14 : struct spdk_nvmf_host *host = NULL;
829 :
830 14 : TAILQ_FOREACH(host, &subsystem->hosts, link) {
831 9 : if (strcmp(hostnqn, host->nqn) == 0) {
832 9 : return host;
833 : }
834 : }
835 :
836 5 : return NULL;
837 : }
838 :
839 : int
840 4 : spdk_nvmf_subsystem_add_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn,
841 : const struct spdk_json_val *params)
842 : {
843 : struct spdk_nvmf_host *host;
844 : struct spdk_nvmf_transport *transport;
845 : int rc;
846 :
847 4 : if (!nvmf_nqn_is_valid(hostnqn)) {
848 0 : return -EINVAL;
849 : }
850 :
851 4 : pthread_mutex_lock(&subsystem->mutex);
852 :
853 4 : if (nvmf_subsystem_find_host(subsystem, hostnqn)) {
854 : /* This subsystem already allows the specified host. */
855 1 : pthread_mutex_unlock(&subsystem->mutex);
856 1 : return 0;
857 : }
858 :
859 3 : host = calloc(1, sizeof(*host));
860 3 : if (!host) {
861 0 : pthread_mutex_unlock(&subsystem->mutex);
862 0 : return -ENOMEM;
863 : }
864 :
865 3 : snprintf(host->nqn, sizeof(host->nqn), "%s", hostnqn);
866 :
867 : SPDK_DTRACE_PROBE2(nvmf_subsystem_add_host, subsystem->subnqn, host->nqn);
868 :
869 3 : TAILQ_INSERT_HEAD(&subsystem->hosts, host, link);
870 :
871 3 : if (!TAILQ_EMPTY(&subsystem->listeners)) {
872 0 : nvmf_update_discovery_log(subsystem->tgt, hostnqn);
873 : }
874 :
875 3 : for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
876 0 : transport = spdk_nvmf_transport_get_next(transport)) {
877 1 : if (transport->ops->subsystem_add_host) {
878 1 : rc = transport->ops->subsystem_add_host(transport, subsystem, hostnqn, params);
879 1 : if (rc) {
880 1 : SPDK_ERRLOG("Unable to add host to %s transport\n", transport->ops->name);
881 : /* Remove this host from all transports we've managed to add it to. */
882 1 : pthread_mutex_unlock(&subsystem->mutex);
883 1 : spdk_nvmf_subsystem_remove_host(subsystem, hostnqn);
884 1 : return rc;
885 : }
886 : }
887 : }
888 :
889 2 : pthread_mutex_unlock(&subsystem->mutex);
890 :
891 2 : return 0;
892 : }
893 :
894 : int
895 4 : spdk_nvmf_subsystem_remove_host(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
896 : {
897 : struct spdk_nvmf_host *host;
898 : struct spdk_nvmf_transport *transport;
899 :
900 4 : pthread_mutex_lock(&subsystem->mutex);
901 :
902 4 : host = nvmf_subsystem_find_host(subsystem, hostnqn);
903 4 : if (host == NULL) {
904 1 : pthread_mutex_unlock(&subsystem->mutex);
905 1 : return -ENOENT;
906 : }
907 :
908 : SPDK_DTRACE_PROBE2(nvmf_subsystem_remove_host, subsystem->subnqn, host->nqn);
909 :
910 3 : nvmf_subsystem_remove_host(subsystem, host);
911 :
912 3 : if (!TAILQ_EMPTY(&subsystem->listeners)) {
913 1 : nvmf_update_discovery_log(subsystem->tgt, hostnqn);
914 : }
915 :
916 4 : for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
917 1 : transport = spdk_nvmf_transport_get_next(transport)) {
918 1 : if (transport->ops->subsystem_remove_host) {
919 0 : transport->ops->subsystem_remove_host(transport, subsystem, hostnqn);
920 : }
921 : }
922 :
923 3 : pthread_mutex_unlock(&subsystem->mutex);
924 :
925 3 : return 0;
926 : }
927 :
928 : struct nvmf_subsystem_disconnect_host_ctx {
929 : struct spdk_nvmf_subsystem *subsystem;
930 : char *hostnqn;
931 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
932 : void *cb_arg;
933 : };
934 :
935 : static void
936 0 : nvmf_subsystem_disconnect_host_fini(struct spdk_io_channel_iter *i, int status)
937 : {
938 : struct nvmf_subsystem_disconnect_host_ctx *ctx;
939 :
940 0 : ctx = spdk_io_channel_iter_get_ctx(i);
941 :
942 0 : if (ctx->cb_fn) {
943 0 : ctx->cb_fn(ctx->cb_arg, status);
944 : }
945 0 : free(ctx->hostnqn);
946 0 : free(ctx);
947 0 : }
948 :
949 : static void
950 0 : nvmf_subsystem_disconnect_qpairs_by_host(struct spdk_io_channel_iter *i)
951 : {
952 : struct nvmf_subsystem_disconnect_host_ctx *ctx;
953 : struct spdk_nvmf_poll_group *group;
954 : struct spdk_io_channel *ch;
955 : struct spdk_nvmf_qpair *qpair, *tmp_qpair;
956 : struct spdk_nvmf_ctrlr *ctrlr;
957 :
958 0 : ctx = spdk_io_channel_iter_get_ctx(i);
959 0 : ch = spdk_io_channel_iter_get_channel(i);
960 0 : group = spdk_io_channel_get_ctx(ch);
961 :
962 0 : TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
963 0 : ctrlr = qpair->ctrlr;
964 :
965 0 : if (ctrlr == NULL || ctrlr->subsys != ctx->subsystem) {
966 0 : continue;
967 : }
968 :
969 0 : if (strncmp(ctrlr->hostnqn, ctx->hostnqn, sizeof(ctrlr->hostnqn)) == 0) {
970 : /* Right now this does not wait for the queue pairs to actually disconnect. */
971 0 : spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
972 : }
973 : }
974 0 : spdk_for_each_channel_continue(i, 0);
975 0 : }
976 :
977 : int
978 0 : spdk_nvmf_subsystem_disconnect_host(struct spdk_nvmf_subsystem *subsystem,
979 : const char *hostnqn,
980 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
981 : void *cb_arg)
982 : {
983 : struct nvmf_subsystem_disconnect_host_ctx *ctx;
984 :
985 0 : ctx = calloc(1, sizeof(struct nvmf_subsystem_disconnect_host_ctx));
986 0 : if (ctx == NULL) {
987 0 : return -ENOMEM;
988 : }
989 :
990 0 : ctx->hostnqn = strdup(hostnqn);
991 0 : if (ctx->hostnqn == NULL) {
992 0 : free(ctx);
993 0 : return -ENOMEM;
994 : }
995 :
996 0 : ctx->subsystem = subsystem;
997 0 : ctx->cb_fn = cb_fn;
998 0 : ctx->cb_arg = cb_arg;
999 :
1000 0 : spdk_for_each_channel(subsystem->tgt, nvmf_subsystem_disconnect_qpairs_by_host, ctx,
1001 : nvmf_subsystem_disconnect_host_fini);
1002 :
1003 0 : return 0;
1004 : }
1005 :
1006 : int
1007 0 : spdk_nvmf_subsystem_set_allow_any_host(struct spdk_nvmf_subsystem *subsystem, bool allow_any_host)
1008 : {
1009 0 : pthread_mutex_lock(&subsystem->mutex);
1010 0 : subsystem->flags.allow_any_host = allow_any_host;
1011 0 : if (!TAILQ_EMPTY(&subsystem->listeners)) {
1012 0 : nvmf_update_discovery_log(subsystem->tgt, NULL);
1013 : }
1014 0 : pthread_mutex_unlock(&subsystem->mutex);
1015 :
1016 0 : return 0;
1017 : }
1018 :
1019 : bool
1020 0 : spdk_nvmf_subsystem_get_allow_any_host(const struct spdk_nvmf_subsystem *subsystem)
1021 : {
1022 : bool allow_any_host;
1023 : struct spdk_nvmf_subsystem *sub;
1024 :
1025 : /* Technically, taking the mutex modifies data in the subsystem. But the const
1026 : * is still important to convey that this doesn't mutate any other data. Cast
1027 : * it away to work around this. */
1028 0 : sub = (struct spdk_nvmf_subsystem *)subsystem;
1029 :
1030 0 : pthread_mutex_lock(&sub->mutex);
1031 0 : allow_any_host = sub->flags.allow_any_host;
1032 0 : pthread_mutex_unlock(&sub->mutex);
1033 :
1034 0 : return allow_any_host;
1035 : }
1036 :
1037 : bool
1038 31 : spdk_nvmf_subsystem_host_allowed(struct spdk_nvmf_subsystem *subsystem, const char *hostnqn)
1039 : {
1040 : bool allowed;
1041 :
1042 31 : if (!hostnqn) {
1043 0 : return false;
1044 : }
1045 :
1046 31 : pthread_mutex_lock(&subsystem->mutex);
1047 :
1048 31 : if (subsystem->flags.allow_any_host) {
1049 25 : pthread_mutex_unlock(&subsystem->mutex);
1050 25 : return true;
1051 : }
1052 :
1053 6 : allowed = nvmf_subsystem_find_host(subsystem, hostnqn) != NULL;
1054 6 : pthread_mutex_unlock(&subsystem->mutex);
1055 :
1056 6 : return allowed;
1057 : }
1058 :
1059 : struct spdk_nvmf_host *
1060 0 : spdk_nvmf_subsystem_get_first_host(struct spdk_nvmf_subsystem *subsystem)
1061 : {
1062 0 : return TAILQ_FIRST(&subsystem->hosts);
1063 : }
1064 :
1065 :
1066 : struct spdk_nvmf_host *
1067 0 : spdk_nvmf_subsystem_get_next_host(struct spdk_nvmf_subsystem *subsystem,
1068 : struct spdk_nvmf_host *prev_host)
1069 : {
1070 0 : return TAILQ_NEXT(prev_host, link);
1071 : }
1072 :
1073 : const char *
1074 0 : spdk_nvmf_host_get_nqn(const struct spdk_nvmf_host *host)
1075 : {
1076 0 : return host->nqn;
1077 : }
1078 :
1079 : struct spdk_nvmf_subsystem_listener *
1080 7 : nvmf_subsystem_find_listener(struct spdk_nvmf_subsystem *subsystem,
1081 : const struct spdk_nvme_transport_id *trid)
1082 : {
1083 : struct spdk_nvmf_subsystem_listener *listener;
1084 :
1085 22 : TAILQ_FOREACH(listener, &subsystem->listeners, link) {
1086 15 : if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) {
1087 0 : return listener;
1088 : }
1089 : }
1090 :
1091 7 : return NULL;
1092 : }
1093 :
1094 : /**
1095 : * Function to be called once the target is listening.
1096 : *
1097 : * \param ctx Context argument passed to this function.
1098 : * \param status 0 if it completed successfully, or negative errno if it failed.
1099 : */
1100 : static void
1101 7 : _nvmf_subsystem_add_listener_done(void *ctx, int status)
1102 : {
1103 7 : struct spdk_nvmf_subsystem_listener *listener = ctx;
1104 :
1105 7 : if (status) {
1106 0 : listener->cb_fn(listener->cb_arg, status);
1107 0 : free(listener);
1108 0 : return;
1109 : }
1110 :
1111 7 : TAILQ_INSERT_HEAD(&listener->subsystem->listeners, listener, link);
1112 7 : nvmf_update_discovery_log(listener->subsystem->tgt, NULL);
1113 7 : listener->cb_fn(listener->cb_arg, status);
1114 : }
1115 :
1116 : void
1117 7 : spdk_nvmf_subsystem_listener_opts_init(struct spdk_nvmf_listener_opts *opts, size_t size)
1118 : {
1119 7 : if (opts == NULL) {
1120 0 : SPDK_ERRLOG("opts should not be NULL\n");
1121 0 : assert(false);
1122 : return;
1123 : }
1124 7 : if (size == 0) {
1125 0 : SPDK_ERRLOG("size should not be zero\n");
1126 0 : assert(false);
1127 : return;
1128 : }
1129 :
1130 7 : memset(opts, 0, size);
1131 7 : opts->opts_size = size;
1132 :
1133 : #define FIELD_OK(field) \
1134 : offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(opts->field) <= size
1135 :
1136 : #define SET_FIELD(field, value) \
1137 : if (FIELD_OK(field)) { \
1138 : opts->field = value; \
1139 : } \
1140 :
1141 7 : SET_FIELD(secure_channel, false);
1142 7 : SET_FIELD(ana_state, SPDK_NVME_ANA_OPTIMIZED_STATE);
1143 :
1144 : #undef FIELD_OK
1145 : #undef SET_FIELD
1146 : }
1147 :
1148 : static int
1149 0 : listener_opts_copy(struct spdk_nvmf_listener_opts *src, struct spdk_nvmf_listener_opts *dst)
1150 : {
1151 0 : if (src->opts_size == 0) {
1152 0 : SPDK_ERRLOG("source structure size should not be zero\n");
1153 0 : assert(false);
1154 : return -EINVAL;
1155 : }
1156 :
1157 0 : memset(dst, 0, sizeof(*dst));
1158 0 : dst->opts_size = src->opts_size;
1159 :
1160 : #define FIELD_OK(field) \
1161 : offsetof(struct spdk_nvmf_listener_opts, field) + sizeof(src->field) <= src->opts_size
1162 :
1163 : #define SET_FIELD(field) \
1164 : if (FIELD_OK(field)) { \
1165 : dst->field = src->field; \
1166 : } \
1167 :
1168 0 : SET_FIELD(secure_channel);
1169 0 : SET_FIELD(ana_state);
1170 : /* We should not remove this statement, but need to update the assert statement
1171 : * if we add a new field, and also add a corresponding SET_FIELD statement. */
1172 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_listener_opts) == 16, "Incorrect size");
1173 :
1174 : #undef SET_FIELD
1175 : #undef FIELD_OK
1176 :
1177 0 : return 0;
1178 : }
1179 :
1180 : static void
1181 7 : _nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem,
1182 : struct spdk_nvme_transport_id *trid,
1183 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
1184 : void *cb_arg, struct spdk_nvmf_listener_opts *opts)
1185 : {
1186 : struct spdk_nvmf_transport *transport;
1187 : struct spdk_nvmf_subsystem_listener *listener;
1188 : struct spdk_nvmf_listener *tr_listener;
1189 : uint32_t i;
1190 : uint32_t id;
1191 7 : int rc = 0;
1192 :
1193 7 : assert(cb_fn != NULL);
1194 :
1195 7 : if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1196 0 : subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1197 0 : cb_fn(cb_arg, -EAGAIN);
1198 0 : return;
1199 : }
1200 :
1201 7 : if (nvmf_subsystem_find_listener(subsystem, trid)) {
1202 : /* Listener already exists in this subsystem */
1203 0 : cb_fn(cb_arg, 0);
1204 0 : return;
1205 : }
1206 :
1207 7 : transport = spdk_nvmf_tgt_get_transport(subsystem->tgt, trid->trstring);
1208 7 : if (!transport) {
1209 0 : SPDK_ERRLOG("Unable to find %s transport. The transport must be created first also make sure it is properly registered.\n",
1210 : trid->trstring);
1211 0 : cb_fn(cb_arg, -EINVAL);
1212 0 : return;
1213 : }
1214 :
1215 7 : tr_listener = nvmf_transport_find_listener(transport, trid);
1216 7 : if (!tr_listener) {
1217 0 : SPDK_ERRLOG("Cannot find transport listener for %s\n", trid->traddr);
1218 0 : cb_fn(cb_arg, -EINVAL);
1219 0 : return;
1220 : }
1221 :
1222 7 : listener = calloc(1, sizeof(*listener));
1223 7 : if (!listener) {
1224 0 : cb_fn(cb_arg, -ENOMEM);
1225 0 : return;
1226 : }
1227 :
1228 7 : listener->trid = &tr_listener->trid;
1229 7 : listener->transport = transport;
1230 7 : listener->cb_fn = cb_fn;
1231 7 : listener->cb_arg = cb_arg;
1232 7 : listener->subsystem = subsystem;
1233 7 : listener->ana_state = calloc(subsystem->max_nsid, sizeof(enum spdk_nvme_ana_state));
1234 7 : if (!listener->ana_state) {
1235 0 : free(listener);
1236 0 : cb_fn(cb_arg, -ENOMEM);
1237 0 : return;
1238 : }
1239 :
1240 7 : spdk_nvmf_subsystem_listener_opts_init(&listener->opts, sizeof(listener->opts));
1241 7 : if (opts != NULL) {
1242 0 : rc = listener_opts_copy(opts, &listener->opts);
1243 0 : if (rc) {
1244 0 : SPDK_ERRLOG("Unable to copy listener options\n");
1245 0 : free(listener->ana_state);
1246 0 : free(listener);
1247 0 : cb_fn(cb_arg, -EINVAL);
1248 0 : return;
1249 : }
1250 : }
1251 :
1252 7 : id = spdk_bit_array_find_first_clear(subsystem->used_listener_ids, 0);
1253 7 : if (id == UINT32_MAX) {
1254 0 : SPDK_ERRLOG("Cannot add any more listeners\n");
1255 0 : free(listener->ana_state);
1256 0 : free(listener);
1257 0 : cb_fn(cb_arg, -EINVAL);
1258 0 : return;
1259 : }
1260 :
1261 7 : spdk_bit_array_set(subsystem->used_listener_ids, id);
1262 7 : listener->id = id;
1263 :
1264 231 : for (i = 0; i < subsystem->max_nsid; i++) {
1265 224 : listener->ana_state[i] = listener->opts.ana_state;
1266 : }
1267 :
1268 7 : if (transport->ops->listen_associate != NULL) {
1269 0 : rc = transport->ops->listen_associate(transport, subsystem, trid);
1270 : }
1271 :
1272 : SPDK_DTRACE_PROBE4(nvmf_subsystem_add_listener, subsystem->subnqn, listener->trid->trtype,
1273 : listener->trid->traddr, listener->trid->trsvcid);
1274 :
1275 7 : _nvmf_subsystem_add_listener_done(listener, rc);
1276 : }
1277 :
1278 : void
1279 7 : spdk_nvmf_subsystem_add_listener(struct spdk_nvmf_subsystem *subsystem,
1280 : struct spdk_nvme_transport_id *trid,
1281 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
1282 : void *cb_arg)
1283 : {
1284 7 : _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, NULL);
1285 7 : }
1286 :
1287 : void
1288 0 : spdk_nvmf_subsystem_add_listener_ext(struct spdk_nvmf_subsystem *subsystem,
1289 : struct spdk_nvme_transport_id *trid,
1290 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
1291 : void *cb_arg, struct spdk_nvmf_listener_opts *opts)
1292 : {
1293 0 : _nvmf_subsystem_add_listener(subsystem, trid, cb_fn, cb_arg, opts);
1294 0 : }
1295 :
1296 : int
1297 0 : spdk_nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
1298 : const struct spdk_nvme_transport_id *trid)
1299 : {
1300 : struct spdk_nvmf_subsystem_listener *listener;
1301 :
1302 0 : if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1303 0 : subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1304 0 : return -EAGAIN;
1305 : }
1306 :
1307 0 : listener = nvmf_subsystem_find_listener(subsystem, trid);
1308 0 : if (listener == NULL) {
1309 0 : return -ENOENT;
1310 : }
1311 :
1312 : SPDK_DTRACE_PROBE4(nvmf_subsystem_remove_listener, subsystem->subnqn, listener->trid->trtype,
1313 : listener->trid->traddr, listener->trid->trsvcid);
1314 :
1315 0 : _nvmf_subsystem_remove_listener(subsystem, listener, false);
1316 :
1317 0 : return 0;
1318 : }
1319 :
1320 : void
1321 13 : nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
1322 : bool stop)
1323 : {
1324 : struct spdk_nvmf_subsystem_listener *listener, *listener_tmp;
1325 :
1326 20 : TAILQ_FOREACH_SAFE(listener, &subsystem->listeners, link, listener_tmp) {
1327 7 : _nvmf_subsystem_remove_listener(subsystem, listener, stop);
1328 : }
1329 13 : }
1330 :
1331 : bool
1332 0 : spdk_nvmf_subsystem_listener_allowed(struct spdk_nvmf_subsystem *subsystem,
1333 : const struct spdk_nvme_transport_id *trid)
1334 : {
1335 : struct spdk_nvmf_subsystem_listener *listener;
1336 :
1337 0 : TAILQ_FOREACH(listener, &subsystem->listeners, link) {
1338 0 : if (spdk_nvme_transport_id_compare(listener->trid, trid) == 0) {
1339 0 : return true;
1340 : }
1341 : }
1342 :
1343 0 : if (!strcmp(subsystem->subnqn, SPDK_NVMF_DISCOVERY_NQN)) {
1344 0 : SPDK_WARNLOG("Allowing connection to discovery subsystem on %s/%s/%s, "
1345 : "even though this listener was not added to the discovery "
1346 : "subsystem. This behavior is deprecated and will be removed "
1347 : "in a future release.\n",
1348 : spdk_nvme_transport_id_trtype_str(trid->trtype), trid->traddr, trid->trsvcid);
1349 0 : return true;
1350 : }
1351 :
1352 0 : return false;
1353 : }
1354 :
1355 : struct spdk_nvmf_subsystem_listener *
1356 30 : spdk_nvmf_subsystem_get_first_listener(struct spdk_nvmf_subsystem *subsystem)
1357 : {
1358 30 : return TAILQ_FIRST(&subsystem->listeners);
1359 : }
1360 :
1361 : struct spdk_nvmf_subsystem_listener *
1362 155 : spdk_nvmf_subsystem_get_next_listener(struct spdk_nvmf_subsystem *subsystem,
1363 : struct spdk_nvmf_subsystem_listener *prev_listener)
1364 : {
1365 155 : return TAILQ_NEXT(prev_listener, link);
1366 : }
1367 :
1368 : const struct spdk_nvme_transport_id *
1369 0 : spdk_nvmf_subsystem_listener_get_trid(struct spdk_nvmf_subsystem_listener *listener)
1370 : {
1371 0 : return listener->trid;
1372 : }
1373 :
1374 : void
1375 0 : spdk_nvmf_subsystem_allow_any_listener(struct spdk_nvmf_subsystem *subsystem,
1376 : bool allow_any_listener)
1377 : {
1378 0 : subsystem->flags.allow_any_listener = allow_any_listener;
1379 0 : }
1380 :
1381 2 : SPDK_LOG_DEPRECATION_REGISTER(spdk_nvmf_subsytem_any_listener_allowed,
1382 : "spdk_nvmf_subsytem_any_listener_allowed is deprecated", "v24.05", 0);
1383 :
1384 : bool
1385 0 : spdk_nvmf_subsytem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem)
1386 : {
1387 0 : SPDK_LOG_DEPRECATED(spdk_nvmf_subsytem_any_listener_allowed);
1388 0 : return subsystem->flags.allow_any_listener;
1389 : }
1390 :
1391 : bool
1392 0 : spdk_nvmf_subsystem_any_listener_allowed(struct spdk_nvmf_subsystem *subsystem)
1393 : {
1394 0 : return subsystem->flags.allow_any_listener;
1395 : }
1396 :
1397 : struct subsystem_update_ns_ctx {
1398 : struct spdk_nvmf_subsystem *subsystem;
1399 :
1400 : spdk_nvmf_subsystem_state_change_done cb_fn;
1401 : void *cb_arg;
1402 : };
1403 :
1404 : static void
1405 0 : subsystem_update_ns_done(struct spdk_io_channel_iter *i, int status)
1406 : {
1407 0 : struct subsystem_update_ns_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
1408 :
1409 0 : if (ctx->cb_fn) {
1410 0 : ctx->cb_fn(ctx->subsystem, ctx->cb_arg, status);
1411 : }
1412 0 : free(ctx);
1413 0 : }
1414 :
1415 : static void
1416 0 : subsystem_update_ns_on_pg(struct spdk_io_channel_iter *i)
1417 : {
1418 : int rc;
1419 : struct subsystem_update_ns_ctx *ctx;
1420 : struct spdk_nvmf_poll_group *group;
1421 : struct spdk_nvmf_subsystem *subsystem;
1422 :
1423 0 : ctx = spdk_io_channel_iter_get_ctx(i);
1424 0 : group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i));
1425 0 : subsystem = ctx->subsystem;
1426 :
1427 0 : rc = nvmf_poll_group_update_subsystem(group, subsystem);
1428 0 : spdk_for_each_channel_continue(i, rc);
1429 0 : }
1430 :
1431 : static int
1432 0 : nvmf_subsystem_update_ns(struct spdk_nvmf_subsystem *subsystem,
1433 : spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg)
1434 : {
1435 : struct subsystem_update_ns_ctx *ctx;
1436 :
1437 0 : ctx = calloc(1, sizeof(*ctx));
1438 0 : if (ctx == NULL) {
1439 0 : SPDK_ERRLOG("Can't alloc subsystem poll group update context\n");
1440 0 : return -ENOMEM;
1441 : }
1442 0 : ctx->subsystem = subsystem;
1443 0 : ctx->cb_fn = cb_fn;
1444 0 : ctx->cb_arg = cb_arg;
1445 :
1446 0 : spdk_for_each_channel(subsystem->tgt,
1447 : subsystem_update_ns_on_pg,
1448 : ctx,
1449 : subsystem_update_ns_done);
1450 0 : return 0;
1451 : }
1452 :
1453 : static void
1454 7 : nvmf_subsystem_ns_changed(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1455 : {
1456 : struct spdk_nvmf_ctrlr *ctrlr;
1457 :
1458 9 : TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
1459 2 : nvmf_ctrlr_ns_changed(ctrlr, nsid);
1460 : }
1461 7 : }
1462 :
1463 : static uint32_t nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns);
1464 :
1465 : int
1466 3 : spdk_nvmf_subsystem_remove_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1467 : {
1468 : struct spdk_nvmf_transport *transport;
1469 : struct spdk_nvmf_ns *ns;
1470 :
1471 3 : if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1472 1 : subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1473 0 : assert(false);
1474 : return -1;
1475 : }
1476 :
1477 3 : if (nsid == 0 || nsid > subsystem->max_nsid) {
1478 0 : return -1;
1479 : }
1480 :
1481 3 : ns = subsystem->ns[nsid - 1];
1482 3 : if (!ns) {
1483 0 : return -1;
1484 : }
1485 :
1486 3 : subsystem->ns[nsid - 1] = NULL;
1487 :
1488 3 : assert(ns->anagrpid - 1 < subsystem->max_nsid);
1489 3 : assert(subsystem->ana_group[ns->anagrpid - 1] > 0);
1490 :
1491 3 : subsystem->ana_group[ns->anagrpid - 1]--;
1492 :
1493 3 : free(ns->ptpl_file);
1494 3 : nvmf_ns_reservation_clear_all_registrants(ns);
1495 3 : spdk_bdev_module_release_bdev(ns->bdev);
1496 3 : spdk_bdev_close(ns->desc);
1497 3 : free(ns);
1498 :
1499 3 : for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
1500 0 : transport = spdk_nvmf_transport_get_next(transport)) {
1501 0 : if (transport->ops->subsystem_remove_ns) {
1502 0 : transport->ops->subsystem_remove_ns(transport, subsystem, nsid);
1503 : }
1504 : }
1505 :
1506 3 : nvmf_subsystem_ns_changed(subsystem, nsid);
1507 :
1508 3 : return 0;
1509 : }
1510 :
1511 : struct subsystem_ns_change_ctx {
1512 : struct spdk_nvmf_subsystem *subsystem;
1513 : spdk_nvmf_subsystem_state_change_done cb_fn;
1514 : uint32_t nsid;
1515 : };
1516 :
1517 : static void
1518 1 : _nvmf_ns_hot_remove(struct spdk_nvmf_subsystem *subsystem,
1519 : void *cb_arg, int status)
1520 : {
1521 1 : struct subsystem_ns_change_ctx *ctx = cb_arg;
1522 : int rc;
1523 :
1524 1 : rc = spdk_nvmf_subsystem_remove_ns(subsystem, ctx->nsid);
1525 1 : if (rc != 0) {
1526 0 : SPDK_ERRLOG("Failed to make changes to NVME-oF subsystem with id: %u\n", subsystem->id);
1527 : }
1528 :
1529 1 : rc = spdk_nvmf_subsystem_resume(subsystem, NULL, NULL);
1530 1 : if (rc != 0) {
1531 0 : SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id);
1532 : }
1533 :
1534 1 : free(ctx);
1535 1 : }
1536 :
1537 : static void
1538 0 : nvmf_ns_change_msg(void *ns_ctx)
1539 : {
1540 0 : struct subsystem_ns_change_ctx *ctx = ns_ctx;
1541 : int rc;
1542 :
1543 : SPDK_DTRACE_PROBE2(nvmf_ns_change, ctx->nsid, ctx->subsystem->subnqn);
1544 :
1545 0 : rc = spdk_nvmf_subsystem_pause(ctx->subsystem, ctx->nsid, ctx->cb_fn, ctx);
1546 0 : if (rc) {
1547 0 : if (rc == -EBUSY) {
1548 : /* Try again, this is not a permanent situation. */
1549 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ctx);
1550 : } else {
1551 0 : free(ctx);
1552 0 : SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n");
1553 : }
1554 : }
1555 0 : }
1556 :
1557 : static void
1558 1 : nvmf_ns_hot_remove(void *remove_ctx)
1559 : {
1560 1 : struct spdk_nvmf_ns *ns = remove_ctx;
1561 : struct subsystem_ns_change_ctx *ns_ctx;
1562 : int rc;
1563 :
1564 : /* We have to allocate a new context because this op
1565 : * is asynchronous and we could lose the ns in the middle.
1566 : */
1567 1 : ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx));
1568 1 : if (!ns_ctx) {
1569 0 : SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n");
1570 0 : return;
1571 : }
1572 :
1573 1 : ns_ctx->subsystem = ns->subsystem;
1574 1 : ns_ctx->nsid = ns->opts.nsid;
1575 1 : ns_ctx->cb_fn = _nvmf_ns_hot_remove;
1576 :
1577 1 : rc = spdk_nvmf_subsystem_pause(ns->subsystem, ns_ctx->nsid, _nvmf_ns_hot_remove, ns_ctx);
1578 1 : if (rc) {
1579 0 : if (rc == -EBUSY) {
1580 : /* Try again, this is not a permanent situation. */
1581 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx);
1582 : } else {
1583 0 : SPDK_ERRLOG("Unable to pause subsystem to process namespace removal!\n");
1584 0 : free(ns_ctx);
1585 : }
1586 : }
1587 : }
1588 :
1589 : static void
1590 1 : _nvmf_ns_resize(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
1591 : {
1592 1 : struct subsystem_ns_change_ctx *ctx = cb_arg;
1593 :
1594 1 : nvmf_subsystem_ns_changed(subsystem, ctx->nsid);
1595 1 : if (spdk_nvmf_subsystem_resume(subsystem, NULL, NULL) != 0) {
1596 0 : SPDK_ERRLOG("Failed to resume NVME-oF subsystem with id: %u\n", subsystem->id);
1597 : }
1598 :
1599 1 : free(ctx);
1600 1 : }
1601 :
1602 : static void
1603 1 : nvmf_ns_resize(void *event_ctx)
1604 : {
1605 1 : struct spdk_nvmf_ns *ns = event_ctx;
1606 : struct subsystem_ns_change_ctx *ns_ctx;
1607 : int rc;
1608 :
1609 : /* We have to allocate a new context because this op
1610 : * is asynchronous and we could lose the ns in the middle.
1611 : */
1612 1 : ns_ctx = calloc(1, sizeof(struct subsystem_ns_change_ctx));
1613 1 : if (!ns_ctx) {
1614 0 : SPDK_ERRLOG("Unable to allocate context to process namespace removal!\n");
1615 0 : return;
1616 : }
1617 :
1618 1 : ns_ctx->subsystem = ns->subsystem;
1619 1 : ns_ctx->nsid = ns->opts.nsid;
1620 1 : ns_ctx->cb_fn = _nvmf_ns_resize;
1621 :
1622 : /* Specify 0 for the nsid here, because we do not need to pause the namespace.
1623 : * Namespaces can only be resized bigger, so there is no need to quiesce I/O.
1624 : */
1625 1 : rc = spdk_nvmf_subsystem_pause(ns->subsystem, 0, _nvmf_ns_resize, ns_ctx);
1626 1 : if (rc) {
1627 0 : if (rc == -EBUSY) {
1628 : /* Try again, this is not a permanent situation. */
1629 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_ns_change_msg, ns_ctx);
1630 : } else {
1631 0 : SPDK_ERRLOG("Unable to pause subsystem to process namespace resize!\n");
1632 0 : free(ns_ctx);
1633 : }
1634 : }
1635 : }
1636 :
1637 : static void
1638 2 : nvmf_ns_event(enum spdk_bdev_event_type type,
1639 : struct spdk_bdev *bdev,
1640 : void *event_ctx)
1641 : {
1642 2 : SPDK_DEBUGLOG(nvmf, "Bdev event: type %d, name %s, subsystem_id %d, ns_id %d\n",
1643 : type,
1644 : spdk_bdev_get_name(bdev),
1645 : ((struct spdk_nvmf_ns *)event_ctx)->subsystem->id,
1646 : ((struct spdk_nvmf_ns *)event_ctx)->nsid);
1647 :
1648 2 : switch (type) {
1649 1 : case SPDK_BDEV_EVENT_REMOVE:
1650 1 : nvmf_ns_hot_remove(event_ctx);
1651 1 : break;
1652 1 : case SPDK_BDEV_EVENT_RESIZE:
1653 1 : nvmf_ns_resize(event_ctx);
1654 1 : break;
1655 0 : default:
1656 0 : SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
1657 0 : break;
1658 : }
1659 2 : }
1660 :
1661 : void
1662 9 : spdk_nvmf_ns_opts_get_defaults(struct spdk_nvmf_ns_opts *opts, size_t opts_size)
1663 : {
1664 9 : if (!opts) {
1665 0 : SPDK_ERRLOG("opts should not be NULL.\n");
1666 0 : return;
1667 : }
1668 :
1669 9 : if (!opts_size) {
1670 0 : SPDK_ERRLOG("opts_size should not be zero.\n");
1671 0 : return;
1672 : }
1673 :
1674 9 : memset(opts, 0, opts_size);
1675 9 : opts->opts_size = opts_size;
1676 :
1677 : #define FIELD_OK(field) \
1678 : offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= opts_size
1679 :
1680 : #define SET_FIELD(field, value) \
1681 : if (FIELD_OK(field)) { \
1682 : opts->field = value; \
1683 : } \
1684 :
1685 : /* All current fields are set to 0 by default. */
1686 9 : SET_FIELD(nsid, 0);
1687 9 : if (FIELD_OK(nguid)) {
1688 9 : memset(opts->nguid, 0, sizeof(opts->nguid));
1689 : }
1690 9 : if (FIELD_OK(eui64)) {
1691 9 : memset(opts->eui64, 0, sizeof(opts->eui64));
1692 : }
1693 9 : if (FIELD_OK(uuid)) {
1694 9 : spdk_uuid_set_null(&opts->uuid);
1695 : }
1696 9 : SET_FIELD(anagrpid, 0);
1697 :
1698 : #undef FIELD_OK
1699 : #undef SET_FIELD
1700 : }
1701 :
1702 : static void
1703 4 : nvmf_ns_opts_copy(struct spdk_nvmf_ns_opts *opts,
1704 : const struct spdk_nvmf_ns_opts *user_opts,
1705 : size_t opts_size)
1706 : {
1707 : #define FIELD_OK(field) \
1708 : offsetof(struct spdk_nvmf_ns_opts, field) + sizeof(opts->field) <= user_opts->opts_size
1709 :
1710 : #define SET_FIELD(field) \
1711 : if (FIELD_OK(field)) { \
1712 : opts->field = user_opts->field; \
1713 : } \
1714 :
1715 4 : SET_FIELD(nsid);
1716 4 : if (FIELD_OK(nguid)) {
1717 4 : memcpy(opts->nguid, user_opts->nguid, sizeof(opts->nguid));
1718 : }
1719 4 : if (FIELD_OK(eui64)) {
1720 4 : memcpy(opts->eui64, user_opts->eui64, sizeof(opts->eui64));
1721 : }
1722 4 : if (FIELD_OK(uuid)) {
1723 4 : spdk_uuid_copy(&opts->uuid, &user_opts->uuid);
1724 : }
1725 4 : SET_FIELD(anagrpid);
1726 :
1727 4 : opts->opts_size = user_opts->opts_size;
1728 :
1729 : /* We should not remove this statement, but need to update the assert statement
1730 : * if we add a new field, and also add a corresponding SET_FIELD statement.
1731 : */
1732 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_ns_opts) == 64, "Incorrect size");
1733 :
1734 : #undef FIELD_OK
1735 : #undef SET_FIELD
1736 4 : }
1737 :
1738 : /* Dummy bdev module used to to claim bdevs. */
1739 : static struct spdk_bdev_module ns_bdev_module = {
1740 : .name = "NVMe-oF Target",
1741 : };
1742 :
1743 : static int nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns,
1744 : const struct spdk_nvmf_reservation_info *info);
1745 : static int nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns,
1746 : struct spdk_nvmf_reservation_info *info);
1747 : static int nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns,
1748 : struct spdk_nvmf_reservation_info *info);
1749 :
1750 : uint32_t
1751 5 : spdk_nvmf_subsystem_add_ns_ext(struct spdk_nvmf_subsystem *subsystem, const char *bdev_name,
1752 : const struct spdk_nvmf_ns_opts *user_opts, size_t opts_size,
1753 : const char *ptpl_file)
1754 : {
1755 : struct spdk_nvmf_transport *transport;
1756 5 : struct spdk_nvmf_ns_opts opts;
1757 : struct spdk_nvmf_ns *ns;
1758 5 : struct spdk_nvmf_reservation_info info = {0};
1759 : int rc;
1760 : bool zone_append_supported;
1761 : uint64_t max_zone_append_size_kib;
1762 :
1763 5 : if (!(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
1764 0 : subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED)) {
1765 0 : return 0;
1766 : }
1767 :
1768 5 : spdk_nvmf_ns_opts_get_defaults(&opts, sizeof(opts));
1769 5 : if (user_opts) {
1770 4 : nvmf_ns_opts_copy(&opts, user_opts, opts_size);
1771 : }
1772 :
1773 5 : if (opts.nsid == SPDK_NVME_GLOBAL_NS_TAG) {
1774 1 : SPDK_ERRLOG("Invalid NSID %" PRIu32 "\n", opts.nsid);
1775 1 : return 0;
1776 : }
1777 :
1778 4 : if (opts.nsid == 0) {
1779 : /*
1780 : * NSID not specified - find a free index.
1781 : *
1782 : * If no free slots are found, opts.nsid will be subsystem->max_nsid + 1, which will
1783 : * expand max_nsid if possible.
1784 : */
1785 2 : for (opts.nsid = 1; opts.nsid <= subsystem->max_nsid; opts.nsid++) {
1786 2 : if (_nvmf_subsystem_get_ns(subsystem, opts.nsid) == NULL) {
1787 2 : break;
1788 : }
1789 : }
1790 : }
1791 :
1792 4 : if (_nvmf_subsystem_get_ns(subsystem, opts.nsid)) {
1793 1 : SPDK_ERRLOG("Requested NSID %" PRIu32 " already in use\n", opts.nsid);
1794 1 : return 0;
1795 : }
1796 :
1797 3 : if (opts.nsid > subsystem->max_nsid) {
1798 0 : SPDK_ERRLOG("NSID greater than maximum not allowed\n");
1799 0 : return 0;
1800 : }
1801 :
1802 3 : if (opts.anagrpid == 0) {
1803 3 : opts.anagrpid = opts.nsid;
1804 : }
1805 :
1806 3 : if (opts.anagrpid > subsystem->max_nsid) {
1807 0 : SPDK_ERRLOG("ANAGRPID greater than maximum NSID not allowed\n");
1808 0 : return 0;
1809 : }
1810 :
1811 3 : ns = calloc(1, sizeof(*ns));
1812 3 : if (ns == NULL) {
1813 0 : SPDK_ERRLOG("Namespace allocation failed\n");
1814 0 : return 0;
1815 : }
1816 :
1817 3 : rc = spdk_bdev_open_ext(bdev_name, true, nvmf_ns_event, ns, &ns->desc);
1818 3 : if (rc != 0) {
1819 0 : SPDK_ERRLOG("Subsystem %s: bdev %s cannot be opened, error=%d\n",
1820 : subsystem->subnqn, bdev_name, rc);
1821 0 : free(ns);
1822 0 : return 0;
1823 : }
1824 :
1825 3 : ns->bdev = spdk_bdev_desc_get_bdev(ns->desc);
1826 :
1827 3 : if (spdk_bdev_get_md_size(ns->bdev) != 0) {
1828 0 : if (!spdk_bdev_is_md_interleaved(ns->bdev)) {
1829 0 : SPDK_ERRLOG("Can't attach bdev with separate metadata.\n");
1830 0 : spdk_bdev_close(ns->desc);
1831 0 : free(ns);
1832 0 : return 0;
1833 : }
1834 :
1835 0 : if (spdk_bdev_get_md_size(ns->bdev) > SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE) {
1836 0 : SPDK_ERRLOG("Maximum supported interleaved md size %u, current md size %u\n",
1837 : SPDK_BDEV_MAX_INTERLEAVED_MD_SIZE, spdk_bdev_get_md_size(ns->bdev));
1838 0 : spdk_bdev_close(ns->desc);
1839 0 : free(ns);
1840 0 : return 0;
1841 : }
1842 : }
1843 :
1844 3 : rc = spdk_bdev_module_claim_bdev(ns->bdev, ns->desc, &ns_bdev_module);
1845 3 : if (rc != 0) {
1846 0 : spdk_bdev_close(ns->desc);
1847 0 : free(ns);
1848 0 : return 0;
1849 : }
1850 :
1851 : /* Cache the zcopy capability of the bdev device */
1852 3 : ns->zcopy = spdk_bdev_io_type_supported(ns->bdev, SPDK_BDEV_IO_TYPE_ZCOPY);
1853 :
1854 3 : if (spdk_uuid_is_null(&opts.uuid)) {
1855 3 : opts.uuid = *spdk_bdev_get_uuid(ns->bdev);
1856 : }
1857 :
1858 : /* if nguid descriptor is supported by bdev module (nvme) then uuid = nguid */
1859 3 : if (spdk_mem_all_zero(opts.nguid, sizeof(opts.nguid))) {
1860 : SPDK_STATIC_ASSERT(sizeof(opts.nguid) == sizeof(opts.uuid), "size mismatch");
1861 3 : memcpy(opts.nguid, spdk_bdev_get_uuid(ns->bdev), sizeof(opts.nguid));
1862 : }
1863 :
1864 3 : if (spdk_bdev_is_zoned(ns->bdev)) {
1865 0 : SPDK_DEBUGLOG(nvmf, "The added namespace is backed by a zoned block device.\n");
1866 0 : ns->csi = SPDK_NVME_CSI_ZNS;
1867 :
1868 0 : zone_append_supported = spdk_bdev_io_type_supported(ns->bdev,
1869 : SPDK_BDEV_IO_TYPE_ZONE_APPEND);
1870 0 : max_zone_append_size_kib = spdk_bdev_get_max_zone_append_size(
1871 0 : ns->bdev) * spdk_bdev_get_block_size(ns->bdev);
1872 :
1873 0 : if (_nvmf_subsystem_get_first_zoned_ns(subsystem) != NULL &&
1874 0 : (subsystem->zone_append_supported != zone_append_supported ||
1875 0 : subsystem->max_zone_append_size_kib != max_zone_append_size_kib)) {
1876 0 : SPDK_ERRLOG("Namespaces with different zone append support or different zone append size are not allowed.\n");
1877 0 : goto err;
1878 : }
1879 :
1880 0 : subsystem->zone_append_supported = zone_append_supported;
1881 0 : subsystem->max_zone_append_size_kib = max_zone_append_size_kib;
1882 : }
1883 :
1884 3 : ns->opts = opts;
1885 3 : ns->subsystem = subsystem;
1886 3 : subsystem->ns[opts.nsid - 1] = ns;
1887 3 : ns->nsid = opts.nsid;
1888 3 : ns->anagrpid = opts.anagrpid;
1889 3 : subsystem->ana_group[ns->anagrpid - 1]++;
1890 3 : TAILQ_INIT(&ns->registrants);
1891 3 : if (ptpl_file) {
1892 0 : ns->ptpl_file = strdup(ptpl_file);
1893 0 : if (!ns->ptpl_file) {
1894 0 : SPDK_ERRLOG("Namespace ns->ptpl_file allocation failed\n");
1895 0 : goto err;
1896 : }
1897 : }
1898 :
1899 3 : if (nvmf_ns_is_ptpl_capable(ns)) {
1900 1 : rc = nvmf_ns_reservation_load(ns, &info);
1901 1 : if (rc) {
1902 0 : SPDK_ERRLOG("Subsystem load reservation failed\n");
1903 0 : goto err;
1904 : }
1905 :
1906 1 : rc = nvmf_ns_reservation_restore(ns, &info);
1907 1 : if (rc) {
1908 0 : SPDK_ERRLOG("Subsystem restore reservation failed\n");
1909 0 : goto err;
1910 : }
1911 : }
1912 :
1913 3 : for (transport = spdk_nvmf_transport_get_first(subsystem->tgt); transport;
1914 0 : transport = spdk_nvmf_transport_get_next(transport)) {
1915 0 : if (transport->ops->subsystem_add_ns) {
1916 0 : rc = transport->ops->subsystem_add_ns(transport, subsystem, ns);
1917 0 : if (rc) {
1918 0 : SPDK_ERRLOG("Namespace attachment is not allowed by %s transport\n", transport->ops->name);
1919 0 : nvmf_ns_reservation_clear_all_registrants(ns);
1920 0 : goto err;
1921 : }
1922 : }
1923 : }
1924 :
1925 3 : SPDK_DEBUGLOG(nvmf, "Subsystem %s: bdev %s assigned nsid %" PRIu32 "\n",
1926 : spdk_nvmf_subsystem_get_nqn(subsystem),
1927 : bdev_name,
1928 : opts.nsid);
1929 :
1930 3 : nvmf_subsystem_ns_changed(subsystem, opts.nsid);
1931 :
1932 : SPDK_DTRACE_PROBE2(nvmf_subsystem_add_ns, subsystem->subnqn, ns->nsid);
1933 :
1934 3 : return opts.nsid;
1935 0 : err:
1936 0 : subsystem->ns[opts.nsid - 1] = NULL;
1937 0 : spdk_bdev_module_release_bdev(ns->bdev);
1938 0 : spdk_bdev_close(ns->desc);
1939 0 : free(ns->ptpl_file);
1940 0 : free(ns);
1941 :
1942 0 : return 0;
1943 : }
1944 :
1945 : static uint32_t
1946 13 : nvmf_subsystem_get_next_allocated_nsid(struct spdk_nvmf_subsystem *subsystem,
1947 : uint32_t prev_nsid)
1948 : {
1949 : uint32_t nsid;
1950 :
1951 13 : if (prev_nsid >= subsystem->max_nsid) {
1952 1 : return 0;
1953 : }
1954 :
1955 396 : for (nsid = prev_nsid + 1; nsid <= subsystem->max_nsid; nsid++) {
1956 384 : if (subsystem->ns[nsid - 1]) {
1957 0 : return nsid;
1958 : }
1959 : }
1960 :
1961 12 : return 0;
1962 : }
1963 :
1964 : struct spdk_nvmf_ns *
1965 13 : spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
1966 : {
1967 : uint32_t first_nsid;
1968 :
1969 13 : first_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, 0);
1970 13 : return _nvmf_subsystem_get_ns(subsystem, first_nsid);
1971 : }
1972 :
1973 : struct spdk_nvmf_ns *
1974 0 : spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem,
1975 : struct spdk_nvmf_ns *prev_ns)
1976 : {
1977 : uint32_t next_nsid;
1978 :
1979 0 : next_nsid = nvmf_subsystem_get_next_allocated_nsid(subsystem, prev_ns->opts.nsid);
1980 0 : return _nvmf_subsystem_get_ns(subsystem, next_nsid);
1981 : }
1982 :
1983 : struct spdk_nvmf_ns *
1984 0 : spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
1985 : {
1986 0 : return _nvmf_subsystem_get_ns(subsystem, nsid);
1987 : }
1988 :
1989 : uint32_t
1990 0 : spdk_nvmf_ns_get_id(const struct spdk_nvmf_ns *ns)
1991 : {
1992 0 : return ns->opts.nsid;
1993 : }
1994 :
1995 : struct spdk_bdev *
1996 0 : spdk_nvmf_ns_get_bdev(struct spdk_nvmf_ns *ns)
1997 : {
1998 0 : return ns->bdev;
1999 : }
2000 :
2001 : void
2002 0 : spdk_nvmf_ns_get_opts(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_ns_opts *opts,
2003 : size_t opts_size)
2004 : {
2005 0 : memset(opts, 0, opts_size);
2006 0 : memcpy(opts, &ns->opts, spdk_min(sizeof(ns->opts), opts_size));
2007 0 : }
2008 :
2009 : const char *
2010 0 : spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
2011 : {
2012 0 : return subsystem->sn;
2013 : }
2014 :
2015 : int
2016 4 : spdk_nvmf_subsystem_set_sn(struct spdk_nvmf_subsystem *subsystem, const char *sn)
2017 : {
2018 : size_t len, max_len;
2019 :
2020 4 : max_len = sizeof(subsystem->sn) - 1;
2021 4 : len = strlen(sn);
2022 4 : if (len > max_len) {
2023 1 : SPDK_DEBUGLOG(nvmf, "Invalid sn \"%s\": length %zu > max %zu\n",
2024 : sn, len, max_len);
2025 1 : return -1;
2026 : }
2027 :
2028 3 : if (!nvmf_valid_ascii_string(sn, len)) {
2029 1 : SPDK_DEBUGLOG(nvmf, "Non-ASCII sn\n");
2030 1 : SPDK_LOGDUMP(nvmf, "sn", sn, len);
2031 1 : return -1;
2032 : }
2033 :
2034 2 : snprintf(subsystem->sn, sizeof(subsystem->sn), "%s", sn);
2035 :
2036 2 : return 0;
2037 : }
2038 :
2039 : const char *
2040 0 : spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
2041 : {
2042 0 : return subsystem->mn;
2043 : }
2044 :
2045 : int
2046 0 : spdk_nvmf_subsystem_set_mn(struct spdk_nvmf_subsystem *subsystem, const char *mn)
2047 : {
2048 : size_t len, max_len;
2049 :
2050 0 : if (mn == NULL) {
2051 0 : mn = MODEL_NUMBER_DEFAULT;
2052 : }
2053 0 : max_len = sizeof(subsystem->mn) - 1;
2054 0 : len = strlen(mn);
2055 0 : if (len > max_len) {
2056 0 : SPDK_DEBUGLOG(nvmf, "Invalid mn \"%s\": length %zu > max %zu\n",
2057 : mn, len, max_len);
2058 0 : return -1;
2059 : }
2060 :
2061 0 : if (!nvmf_valid_ascii_string(mn, len)) {
2062 0 : SPDK_DEBUGLOG(nvmf, "Non-ASCII mn\n");
2063 0 : SPDK_LOGDUMP(nvmf, "mn", mn, len);
2064 0 : return -1;
2065 : }
2066 :
2067 0 : snprintf(subsystem->mn, sizeof(subsystem->mn), "%s", mn);
2068 :
2069 0 : return 0;
2070 : }
2071 :
2072 : const char *
2073 0 : spdk_nvmf_subsystem_get_nqn(const struct spdk_nvmf_subsystem *subsystem)
2074 : {
2075 0 : return subsystem->subnqn;
2076 : }
2077 :
2078 : /* We have to use the typedef in the function declaration to appease astyle. */
2079 : typedef enum spdk_nvmf_subtype spdk_nvmf_subtype_t;
2080 :
2081 : spdk_nvmf_subtype_t
2082 0 : spdk_nvmf_subsystem_get_type(struct spdk_nvmf_subsystem *subsystem)
2083 : {
2084 0 : return subsystem->subtype;
2085 : }
2086 :
2087 : uint32_t
2088 0 : spdk_nvmf_subsystem_get_max_nsid(struct spdk_nvmf_subsystem *subsystem)
2089 : {
2090 0 : return subsystem->max_nsid;
2091 : }
2092 :
2093 : int
2094 0 : nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem,
2095 : uint16_t min_cntlid, uint16_t max_cntlid)
2096 : {
2097 0 : if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) {
2098 0 : return -EAGAIN;
2099 : }
2100 :
2101 0 : if (min_cntlid > max_cntlid) {
2102 0 : return -EINVAL;
2103 : }
2104 : /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
2105 0 : if (min_cntlid < NVMF_MIN_CNTLID || min_cntlid > NVMF_MAX_CNTLID ||
2106 0 : max_cntlid < NVMF_MIN_CNTLID || max_cntlid > NVMF_MAX_CNTLID) {
2107 0 : return -EINVAL;
2108 : }
2109 0 : subsystem->min_cntlid = min_cntlid;
2110 0 : subsystem->max_cntlid = max_cntlid;
2111 0 : if (subsystem->next_cntlid < min_cntlid || subsystem->next_cntlid > max_cntlid - 1) {
2112 0 : subsystem->next_cntlid = min_cntlid - 1;
2113 : }
2114 :
2115 0 : return 0;
2116 : }
2117 :
2118 : static uint16_t
2119 1 : nvmf_subsystem_gen_cntlid(struct spdk_nvmf_subsystem *subsystem)
2120 : {
2121 : int count;
2122 :
2123 : /*
2124 : * In the worst case, we might have to try all CNTLID values between min_cntlid and max_cntlid
2125 : * before we find one that is unused (or find that all values are in use).
2126 : */
2127 1 : for (count = 0; count < subsystem->max_cntlid - subsystem->min_cntlid + 1; count++) {
2128 1 : subsystem->next_cntlid++;
2129 1 : if (subsystem->next_cntlid > subsystem->max_cntlid) {
2130 0 : subsystem->next_cntlid = subsystem->min_cntlid;
2131 : }
2132 :
2133 : /* Check if a controller with this cntlid currently exists. */
2134 1 : if (nvmf_subsystem_get_ctrlr(subsystem, subsystem->next_cntlid) == NULL) {
2135 : /* Found unused cntlid */
2136 1 : return subsystem->next_cntlid;
2137 : }
2138 : }
2139 :
2140 : /* All valid cntlid values are in use. */
2141 0 : return 0xFFFF;
2142 : }
2143 :
2144 : int
2145 1 : nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)
2146 : {
2147 :
2148 1 : if (ctrlr->dynamic_ctrlr) {
2149 1 : ctrlr->cntlid = nvmf_subsystem_gen_cntlid(subsystem);
2150 1 : if (ctrlr->cntlid == 0xFFFF) {
2151 : /* Unable to get a cntlid */
2152 0 : SPDK_ERRLOG("Reached max simultaneous ctrlrs\n");
2153 0 : return -EBUSY;
2154 : }
2155 0 : } else if (nvmf_subsystem_get_ctrlr(subsystem, ctrlr->cntlid) != NULL) {
2156 0 : SPDK_ERRLOG("Ctrlr with cntlid %u already exist\n", ctrlr->cntlid);
2157 0 : return -EEXIST;
2158 : }
2159 :
2160 1 : TAILQ_INSERT_TAIL(&subsystem->ctrlrs, ctrlr, link);
2161 :
2162 : SPDK_DTRACE_PROBE3(nvmf_subsystem_add_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn);
2163 :
2164 1 : return 0;
2165 : }
2166 :
2167 : void
2168 1 : nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
2169 : struct spdk_nvmf_ctrlr *ctrlr)
2170 : {
2171 : SPDK_DTRACE_PROBE3(nvmf_subsystem_remove_ctrlr, subsystem->subnqn, ctrlr, ctrlr->hostnqn);
2172 :
2173 1 : assert(spdk_get_thread() == subsystem->thread);
2174 1 : assert(subsystem == ctrlr->subsys);
2175 1 : SPDK_DEBUGLOG(nvmf, "remove ctrlr %p id 0x%x from subsys %p %s\n", ctrlr, ctrlr->cntlid, subsystem,
2176 : subsystem->subnqn);
2177 1 : TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link);
2178 1 : }
2179 :
2180 : struct spdk_nvmf_ctrlr *
2181 2 : nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid)
2182 : {
2183 : struct spdk_nvmf_ctrlr *ctrlr;
2184 :
2185 2 : TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
2186 1 : if (ctrlr->cntlid == cntlid) {
2187 1 : return ctrlr;
2188 : }
2189 : }
2190 :
2191 1 : return NULL;
2192 : }
2193 :
2194 : uint32_t
2195 0 : spdk_nvmf_subsystem_get_max_namespaces(const struct spdk_nvmf_subsystem *subsystem)
2196 : {
2197 0 : return subsystem->max_nsid;
2198 : }
2199 :
2200 : uint16_t
2201 0 : spdk_nvmf_subsystem_get_min_cntlid(const struct spdk_nvmf_subsystem *subsystem)
2202 : {
2203 0 : return subsystem->min_cntlid;
2204 : }
2205 :
2206 : uint16_t
2207 0 : spdk_nvmf_subsystem_get_max_cntlid(const struct spdk_nvmf_subsystem *subsystem)
2208 : {
2209 0 : return subsystem->max_cntlid;
2210 : }
2211 :
2212 : struct _nvmf_ns_registrant {
2213 : uint64_t rkey;
2214 : char *host_uuid;
2215 : };
2216 :
2217 : struct _nvmf_ns_registrants {
2218 : size_t num_regs;
2219 : struct _nvmf_ns_registrant reg[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2220 : };
2221 :
2222 : struct _nvmf_ns_reservation {
2223 : bool ptpl_activated;
2224 : enum spdk_nvme_reservation_type rtype;
2225 : uint64_t crkey;
2226 : char *bdev_uuid;
2227 : char *holder_uuid;
2228 : struct _nvmf_ns_registrants regs;
2229 : };
2230 :
2231 : static const struct spdk_json_object_decoder nvmf_ns_pr_reg_decoders[] = {
2232 : {"rkey", offsetof(struct _nvmf_ns_registrant, rkey), spdk_json_decode_uint64},
2233 : {"host_uuid", offsetof(struct _nvmf_ns_registrant, host_uuid), spdk_json_decode_string},
2234 : };
2235 :
2236 : static int
2237 4 : nvmf_decode_ns_pr_reg(const struct spdk_json_val *val, void *out)
2238 : {
2239 4 : struct _nvmf_ns_registrant *reg = out;
2240 :
2241 4 : return spdk_json_decode_object(val, nvmf_ns_pr_reg_decoders,
2242 : SPDK_COUNTOF(nvmf_ns_pr_reg_decoders), reg);
2243 : }
2244 :
2245 : static int
2246 4 : nvmf_decode_ns_pr_regs(const struct spdk_json_val *val, void *out)
2247 : {
2248 4 : struct _nvmf_ns_registrants *regs = out;
2249 :
2250 4 : return spdk_json_decode_array(val, nvmf_decode_ns_pr_reg, regs->reg,
2251 : SPDK_NVMF_MAX_NUM_REGISTRANTS, ®s->num_regs,
2252 : sizeof(struct _nvmf_ns_registrant));
2253 : }
2254 :
2255 : static const struct spdk_json_object_decoder nvmf_ns_pr_decoders[] = {
2256 : {"ptpl", offsetof(struct _nvmf_ns_reservation, ptpl_activated), spdk_json_decode_bool, true},
2257 : {"rtype", offsetof(struct _nvmf_ns_reservation, rtype), spdk_json_decode_uint32, true},
2258 : {"crkey", offsetof(struct _nvmf_ns_reservation, crkey), spdk_json_decode_uint64, true},
2259 : {"bdev_uuid", offsetof(struct _nvmf_ns_reservation, bdev_uuid), spdk_json_decode_string},
2260 : {"holder_uuid", offsetof(struct _nvmf_ns_reservation, holder_uuid), spdk_json_decode_string, true},
2261 : {"registrants", offsetof(struct _nvmf_ns_reservation, regs), nvmf_decode_ns_pr_regs},
2262 : };
2263 :
2264 : static int
2265 5 : nvmf_ns_reservation_load_json(const struct spdk_nvmf_ns *ns,
2266 : struct spdk_nvmf_reservation_info *info)
2267 : {
2268 : FILE *fd;
2269 5 : size_t json_size;
2270 : ssize_t values_cnt, rc;
2271 5 : void *json = NULL, *end;
2272 5 : struct spdk_json_val *values = NULL;
2273 5 : struct _nvmf_ns_reservation res = {};
2274 5 : const char *file = ns->ptpl_file;
2275 : uint32_t i;
2276 :
2277 5 : fd = fopen(file, "r");
2278 : /* It's not an error if the file does not exist */
2279 5 : if (!fd) {
2280 0 : SPDK_NOTICELOG("File %s does not exist\n", file);
2281 0 : return 0;
2282 : }
2283 :
2284 : /* Load all persist file contents into a local buffer */
2285 5 : json = spdk_posix_file_load(fd, &json_size);
2286 5 : fclose(fd);
2287 5 : if (!json) {
2288 0 : SPDK_ERRLOG("Load persit file %s failed\n", file);
2289 0 : return -ENOMEM;
2290 : }
2291 :
2292 5 : rc = spdk_json_parse(json, json_size, NULL, 0, &end, 0);
2293 5 : if (rc < 0) {
2294 1 : SPDK_NOTICELOG("Parsing JSON configuration failed (%zd)\n", rc);
2295 1 : goto exit;
2296 : }
2297 :
2298 4 : values_cnt = rc;
2299 4 : values = calloc(values_cnt, sizeof(struct spdk_json_val));
2300 4 : if (values == NULL) {
2301 0 : goto exit;
2302 : }
2303 :
2304 4 : rc = spdk_json_parse(json, json_size, values, values_cnt, &end, 0);
2305 4 : if (rc != values_cnt) {
2306 0 : SPDK_ERRLOG("Parsing JSON configuration failed (%zd)\n", rc);
2307 0 : goto exit;
2308 : }
2309 :
2310 : /* Decode json */
2311 4 : if (spdk_json_decode_object(values, nvmf_ns_pr_decoders,
2312 : SPDK_COUNTOF(nvmf_ns_pr_decoders),
2313 : &res)) {
2314 0 : SPDK_ERRLOG("Invalid objects in the persist file %s\n", file);
2315 0 : rc = -EINVAL;
2316 0 : goto exit;
2317 : }
2318 :
2319 4 : if (res.regs.num_regs > SPDK_NVMF_MAX_NUM_REGISTRANTS) {
2320 0 : SPDK_ERRLOG("Can only support up to %u registrants\n", SPDK_NVMF_MAX_NUM_REGISTRANTS);
2321 0 : rc = -ERANGE;
2322 0 : goto exit;
2323 : }
2324 :
2325 4 : rc = 0;
2326 4 : info->ptpl_activated = res.ptpl_activated;
2327 4 : info->rtype = res.rtype;
2328 4 : info->crkey = res.crkey;
2329 4 : snprintf(info->bdev_uuid, sizeof(info->bdev_uuid), "%s", res.bdev_uuid);
2330 4 : snprintf(info->holder_uuid, sizeof(info->holder_uuid), "%s", res.holder_uuid);
2331 4 : info->num_regs = res.regs.num_regs;
2332 8 : for (i = 0; i < res.regs.num_regs; i++) {
2333 4 : info->registrants[i].rkey = res.regs.reg[i].rkey;
2334 4 : snprintf(info->registrants[i].host_uuid, sizeof(info->registrants[i].host_uuid), "%s",
2335 : res.regs.reg[i].host_uuid);
2336 : }
2337 :
2338 5 : exit:
2339 5 : free(json);
2340 5 : free(values);
2341 5 : free(res.bdev_uuid);
2342 5 : free(res.holder_uuid);
2343 9 : for (i = 0; i < res.regs.num_regs; i++) {
2344 4 : free(res.regs.reg[i].host_uuid);
2345 : }
2346 :
2347 5 : return rc;
2348 : }
2349 :
2350 : static bool nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns);
2351 :
2352 : static int
2353 5 : nvmf_ns_reservation_restore(struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info)
2354 : {
2355 : uint32_t i;
2356 5 : struct spdk_nvmf_registrant *reg, *holder = NULL;
2357 5 : struct spdk_uuid bdev_uuid, holder_uuid;
2358 5 : bool rkey_flag = false;
2359 :
2360 5 : SPDK_DEBUGLOG(nvmf, "NSID %u, PTPL %u, Number of registrants %u\n",
2361 : ns->nsid, info->ptpl_activated, info->num_regs);
2362 :
2363 : /* it's not an error */
2364 5 : if (!info->ptpl_activated || !info->num_regs) {
2365 0 : return 0;
2366 : }
2367 :
2368 : /* Check info->crkey exist or not in info->registrants[i].rkey */
2369 14 : for (i = 0; i < info->num_regs; i++) {
2370 9 : if (info->crkey == info->registrants[i].rkey) {
2371 3 : rkey_flag = true;
2372 : }
2373 : }
2374 5 : if (!rkey_flag && info->crkey != 0) {
2375 1 : return -EINVAL;
2376 : }
2377 :
2378 4 : spdk_uuid_parse(&bdev_uuid, info->bdev_uuid);
2379 4 : if (spdk_uuid_compare(&bdev_uuid, spdk_bdev_get_uuid(ns->bdev))) {
2380 1 : SPDK_ERRLOG("Existing bdev UUID is not same with configuration file\n");
2381 1 : return -EINVAL;
2382 : }
2383 :
2384 3 : ns->crkey = info->crkey;
2385 3 : ns->rtype = info->rtype;
2386 3 : ns->ptpl_activated = info->ptpl_activated;
2387 3 : spdk_uuid_parse(&holder_uuid, info->holder_uuid);
2388 :
2389 3 : SPDK_DEBUGLOG(nvmf, "Bdev UUID %s\n", info->bdev_uuid);
2390 3 : if (info->rtype) {
2391 2 : SPDK_DEBUGLOG(nvmf, "Holder UUID %s, RTYPE %u, RKEY 0x%"PRIx64"\n",
2392 : info->holder_uuid, info->rtype, info->crkey);
2393 : }
2394 :
2395 8 : for (i = 0; i < info->num_regs; i++) {
2396 5 : reg = calloc(1, sizeof(*reg));
2397 5 : if (!reg) {
2398 0 : return -ENOMEM;
2399 : }
2400 5 : spdk_uuid_parse(®->hostid, info->registrants[i].host_uuid);
2401 5 : reg->rkey = info->registrants[i].rkey;
2402 5 : TAILQ_INSERT_TAIL(&ns->registrants, reg, link);
2403 5 : if (info->crkey != 0 && !spdk_uuid_compare(&holder_uuid, ®->hostid)) {
2404 2 : holder = reg;
2405 : }
2406 5 : SPDK_DEBUGLOG(nvmf, "Registrant RKEY 0x%"PRIx64", Host UUID %s\n",
2407 : info->registrants[i].rkey, info->registrants[i].host_uuid);
2408 : }
2409 :
2410 3 : if (nvmf_ns_reservation_all_registrants_type(ns)) {
2411 1 : ns->holder = TAILQ_FIRST(&ns->registrants);
2412 : } else {
2413 2 : ns->holder = holder;
2414 : }
2415 :
2416 3 : return 0;
2417 : }
2418 :
2419 : static int
2420 5 : nvmf_ns_json_write_cb(void *cb_ctx, const void *data, size_t size)
2421 : {
2422 5 : char *file = cb_ctx;
2423 : size_t rc;
2424 : FILE *fd;
2425 :
2426 5 : fd = fopen(file, "w");
2427 5 : if (!fd) {
2428 0 : SPDK_ERRLOG("Can't open file %s for write\n", file);
2429 0 : return -ENOENT;
2430 : }
2431 5 : rc = fwrite(data, 1, size, fd);
2432 5 : fclose(fd);
2433 :
2434 5 : return rc == size ? 0 : -1;
2435 : }
2436 :
2437 : static int
2438 5 : nvmf_ns_reservation_update_json(const struct spdk_nvmf_ns *ns,
2439 : const struct spdk_nvmf_reservation_info *info)
2440 : {
2441 5 : const char *file = ns->ptpl_file;
2442 : struct spdk_json_write_ctx *w;
2443 : uint32_t i;
2444 5 : int rc = 0;
2445 :
2446 5 : w = spdk_json_write_begin(nvmf_ns_json_write_cb, (void *)file, 0);
2447 5 : if (w == NULL) {
2448 0 : return -ENOMEM;
2449 : }
2450 : /* clear the configuration file */
2451 5 : if (!info->ptpl_activated) {
2452 1 : goto exit;
2453 : }
2454 :
2455 4 : spdk_json_write_object_begin(w);
2456 4 : spdk_json_write_named_bool(w, "ptpl", info->ptpl_activated);
2457 4 : spdk_json_write_named_uint32(w, "rtype", info->rtype);
2458 4 : spdk_json_write_named_uint64(w, "crkey", info->crkey);
2459 4 : spdk_json_write_named_string(w, "bdev_uuid", info->bdev_uuid);
2460 4 : spdk_json_write_named_string(w, "holder_uuid", info->holder_uuid);
2461 :
2462 4 : spdk_json_write_named_array_begin(w, "registrants");
2463 8 : for (i = 0; i < info->num_regs; i++) {
2464 4 : spdk_json_write_object_begin(w);
2465 4 : spdk_json_write_named_uint64(w, "rkey", info->registrants[i].rkey);
2466 4 : spdk_json_write_named_string(w, "host_uuid", info->registrants[i].host_uuid);
2467 4 : spdk_json_write_object_end(w);
2468 : }
2469 4 : spdk_json_write_array_end(w);
2470 4 : spdk_json_write_object_end(w);
2471 :
2472 5 : exit:
2473 5 : rc = spdk_json_write_end(w);
2474 5 : return rc;
2475 : }
2476 :
2477 : static int
2478 7 : nvmf_ns_update_reservation_info(struct spdk_nvmf_ns *ns)
2479 : {
2480 7 : struct spdk_nvmf_reservation_info info;
2481 : struct spdk_nvmf_registrant *reg, *tmp;
2482 7 : uint32_t i = 0;
2483 :
2484 7 : assert(ns != NULL);
2485 :
2486 7 : if (!ns->bdev || !nvmf_ns_is_ptpl_capable(ns)) {
2487 0 : return 0;
2488 : }
2489 :
2490 7 : memset(&info, 0, sizeof(info));
2491 7 : spdk_uuid_fmt_lower(info.bdev_uuid, sizeof(info.bdev_uuid), spdk_bdev_get_uuid(ns->bdev));
2492 :
2493 7 : if (ns->rtype) {
2494 2 : info.rtype = ns->rtype;
2495 2 : info.crkey = ns->crkey;
2496 2 : if (!nvmf_ns_reservation_all_registrants_type(ns)) {
2497 2 : assert(ns->holder != NULL);
2498 2 : spdk_uuid_fmt_lower(info.holder_uuid, sizeof(info.holder_uuid), &ns->holder->hostid);
2499 : }
2500 : }
2501 :
2502 14 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2503 7 : spdk_uuid_fmt_lower(info.registrants[i].host_uuid, sizeof(info.registrants[i].host_uuid),
2504 7 : ®->hostid);
2505 7 : info.registrants[i++].rkey = reg->rkey;
2506 : }
2507 :
2508 7 : info.num_regs = i;
2509 7 : info.ptpl_activated = ns->ptpl_activated;
2510 :
2511 7 : return nvmf_ns_reservation_update(ns, &info);
2512 : }
2513 :
2514 : static struct spdk_nvmf_registrant *
2515 110 : nvmf_ns_reservation_get_registrant(struct spdk_nvmf_ns *ns,
2516 : struct spdk_uuid *uuid)
2517 : {
2518 : struct spdk_nvmf_registrant *reg, *tmp;
2519 :
2520 191 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2521 151 : if (!spdk_uuid_compare(®->hostid, uuid)) {
2522 70 : return reg;
2523 : }
2524 : }
2525 :
2526 40 : return NULL;
2527 : }
2528 :
2529 : /* Generate reservation notice log to registered HostID controllers */
2530 : static void
2531 10 : nvmf_subsystem_gen_ctrlr_notification(struct spdk_nvmf_subsystem *subsystem,
2532 : struct spdk_nvmf_ns *ns,
2533 : struct spdk_uuid *hostid_list,
2534 : uint32_t num_hostid,
2535 : enum spdk_nvme_reservation_notification_log_page_type type)
2536 : {
2537 : struct spdk_nvmf_ctrlr *ctrlr;
2538 : uint32_t i;
2539 :
2540 25 : for (i = 0; i < num_hostid; i++) {
2541 75 : TAILQ_FOREACH(ctrlr, &subsystem->ctrlrs, link) {
2542 60 : if (!spdk_uuid_compare(&ctrlr->hostid, &hostid_list[i])) {
2543 22 : nvmf_ctrlr_reservation_notice_log(ctrlr, ns, type);
2544 : }
2545 : }
2546 : }
2547 10 : }
2548 :
2549 : /* Get all registrants' hostid other than the controller who issued the command */
2550 : static uint32_t
2551 16 : nvmf_ns_reservation_get_all_other_hostid(struct spdk_nvmf_ns *ns,
2552 : struct spdk_uuid *hostid_list,
2553 : uint32_t max_num_hostid,
2554 : struct spdk_uuid *current_hostid)
2555 : {
2556 : struct spdk_nvmf_registrant *reg, *tmp;
2557 16 : uint32_t num_hostid = 0;
2558 :
2559 55 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2560 39 : if (spdk_uuid_compare(®->hostid, current_hostid)) {
2561 23 : if (num_hostid == max_num_hostid) {
2562 0 : assert(false);
2563 : return max_num_hostid;
2564 : }
2565 23 : hostid_list[num_hostid++] = reg->hostid;
2566 : }
2567 : }
2568 :
2569 16 : return num_hostid;
2570 : }
2571 :
2572 : /* Calculate the unregistered HostID list according to list
2573 : * prior to execute preempt command and list after executing
2574 : * preempt command.
2575 : */
2576 : static uint32_t
2577 3 : nvmf_ns_reservation_get_unregistered_hostid(struct spdk_uuid *old_hostid_list,
2578 : uint32_t old_num_hostid,
2579 : struct spdk_uuid *remaining_hostid_list,
2580 : uint32_t remaining_num_hostid)
2581 : {
2582 3 : struct spdk_uuid temp_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2583 3 : uint32_t i, j, num_hostid = 0;
2584 : bool found;
2585 :
2586 3 : if (!remaining_num_hostid) {
2587 1 : return old_num_hostid;
2588 : }
2589 :
2590 6 : for (i = 0; i < old_num_hostid; i++) {
2591 4 : found = false;
2592 6 : for (j = 0; j < remaining_num_hostid; j++) {
2593 4 : if (!spdk_uuid_compare(&old_hostid_list[i], &remaining_hostid_list[j])) {
2594 2 : found = true;
2595 2 : break;
2596 : }
2597 : }
2598 4 : if (!found) {
2599 2 : spdk_uuid_copy(&temp_hostid_list[num_hostid++], &old_hostid_list[i]);
2600 : }
2601 : }
2602 :
2603 2 : if (num_hostid) {
2604 2 : memcpy(old_hostid_list, temp_hostid_list, sizeof(struct spdk_uuid) * num_hostid);
2605 : }
2606 :
2607 2 : return num_hostid;
2608 : }
2609 :
2610 : /* current reservation type is all registrants or not */
2611 : static bool
2612 54 : nvmf_ns_reservation_all_registrants_type(struct spdk_nvmf_ns *ns)
2613 : {
2614 102 : return (ns->rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS ||
2615 48 : ns->rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
2616 : }
2617 :
2618 : /* current registrant is reservation holder or not */
2619 : static bool
2620 26 : nvmf_ns_reservation_registrant_is_holder(struct spdk_nvmf_ns *ns,
2621 : struct spdk_nvmf_registrant *reg)
2622 : {
2623 26 : if (!reg) {
2624 0 : return false;
2625 : }
2626 :
2627 26 : if (nvmf_ns_reservation_all_registrants_type(ns)) {
2628 2 : return true;
2629 : }
2630 :
2631 24 : return (ns->holder == reg);
2632 : }
2633 :
2634 : static int
2635 29 : nvmf_ns_reservation_add_registrant(struct spdk_nvmf_ns *ns,
2636 : struct spdk_nvmf_ctrlr *ctrlr,
2637 : uint64_t nrkey)
2638 : {
2639 : struct spdk_nvmf_registrant *reg;
2640 :
2641 29 : reg = calloc(1, sizeof(*reg));
2642 29 : if (!reg) {
2643 0 : return -ENOMEM;
2644 : }
2645 :
2646 29 : reg->rkey = nrkey;
2647 : /* set hostid for the registrant */
2648 29 : spdk_uuid_copy(®->hostid, &ctrlr->hostid);
2649 29 : TAILQ_INSERT_TAIL(&ns->registrants, reg, link);
2650 29 : ns->gen++;
2651 :
2652 29 : return 0;
2653 : }
2654 :
2655 : static void
2656 11 : nvmf_ns_reservation_release_reservation(struct spdk_nvmf_ns *ns)
2657 : {
2658 11 : ns->rtype = 0;
2659 11 : ns->crkey = 0;
2660 11 : ns->holder = NULL;
2661 11 : }
2662 :
2663 : /* release the reservation if the last registrant was removed */
2664 : static void
2665 19 : nvmf_ns_reservation_check_release_on_remove_registrant(struct spdk_nvmf_ns *ns,
2666 : struct spdk_nvmf_registrant *reg)
2667 : {
2668 : struct spdk_nvmf_registrant *next_reg;
2669 :
2670 : /* no reservation holder */
2671 19 : if (!ns->holder) {
2672 7 : assert(ns->rtype == 0);
2673 7 : return;
2674 : }
2675 :
2676 12 : next_reg = TAILQ_FIRST(&ns->registrants);
2677 12 : if (next_reg && nvmf_ns_reservation_all_registrants_type(ns)) {
2678 : /* the next valid registrant is the new holder now */
2679 2 : ns->holder = next_reg;
2680 10 : } else if (nvmf_ns_reservation_registrant_is_holder(ns, reg)) {
2681 : /* release the reservation */
2682 7 : nvmf_ns_reservation_release_reservation(ns);
2683 : }
2684 : }
2685 :
2686 : static void
2687 19 : nvmf_ns_reservation_remove_registrant(struct spdk_nvmf_ns *ns,
2688 : struct spdk_nvmf_registrant *reg)
2689 : {
2690 19 : TAILQ_REMOVE(&ns->registrants, reg, link);
2691 19 : nvmf_ns_reservation_check_release_on_remove_registrant(ns, reg);
2692 19 : free(reg);
2693 19 : ns->gen++;
2694 19 : return;
2695 : }
2696 :
2697 : static uint32_t
2698 0 : nvmf_ns_reservation_remove_registrants_by_key(struct spdk_nvmf_ns *ns,
2699 : uint64_t rkey)
2700 : {
2701 : struct spdk_nvmf_registrant *reg, *tmp;
2702 0 : uint32_t count = 0;
2703 :
2704 0 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
2705 0 : if (reg->rkey == rkey) {
2706 0 : nvmf_ns_reservation_remove_registrant(ns, reg);
2707 0 : count++;
2708 : }
2709 : }
2710 0 : return count;
2711 : }
2712 :
2713 : static uint32_t
2714 1 : nvmf_ns_reservation_remove_all_other_registrants(struct spdk_nvmf_ns *ns,
2715 : struct spdk_nvmf_registrant *reg)
2716 : {
2717 : struct spdk_nvmf_registrant *reg_tmp, *reg_tmp2;
2718 1 : uint32_t count = 0;
2719 :
2720 3 : TAILQ_FOREACH_SAFE(reg_tmp, &ns->registrants, link, reg_tmp2) {
2721 2 : if (reg_tmp != reg) {
2722 1 : nvmf_ns_reservation_remove_registrant(ns, reg_tmp);
2723 1 : count++;
2724 : }
2725 : }
2726 1 : return count;
2727 : }
2728 :
2729 : static uint32_t
2730 7 : nvmf_ns_reservation_clear_all_registrants(struct spdk_nvmf_ns *ns)
2731 : {
2732 : struct spdk_nvmf_registrant *reg, *reg_tmp;
2733 7 : uint32_t count = 0;
2734 :
2735 18 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, reg_tmp) {
2736 11 : nvmf_ns_reservation_remove_registrant(ns, reg);
2737 11 : count++;
2738 : }
2739 7 : return count;
2740 : }
2741 :
2742 : static void
2743 12 : nvmf_ns_reservation_acquire_reservation(struct spdk_nvmf_ns *ns, uint64_t rkey,
2744 : enum spdk_nvme_reservation_type rtype,
2745 : struct spdk_nvmf_registrant *holder)
2746 : {
2747 12 : ns->rtype = rtype;
2748 12 : ns->crkey = rkey;
2749 12 : assert(ns->holder == NULL);
2750 12 : ns->holder = holder;
2751 12 : }
2752 :
2753 : static bool
2754 45 : nvmf_ns_reservation_register(struct spdk_nvmf_ns *ns,
2755 : struct spdk_nvmf_ctrlr *ctrlr,
2756 : struct spdk_nvmf_request *req)
2757 : {
2758 45 : struct spdk_nvme_reservation_register_data key = { 0 };
2759 45 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2760 : uint8_t rrega, iekey, cptpl, rtype;
2761 : struct spdk_nvmf_registrant *reg;
2762 45 : uint8_t status = SPDK_NVME_SC_SUCCESS;
2763 45 : bool update_sgroup = false;
2764 45 : struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2765 45 : uint32_t num_hostid = 0;
2766 : int rc;
2767 :
2768 45 : rrega = cmd->cdw10_bits.resv_register.rrega;
2769 45 : iekey = cmd->cdw10_bits.resv_register.iekey;
2770 45 : cptpl = cmd->cdw10_bits.resv_register.cptpl;
2771 :
2772 45 : if (req->iovcnt > 0 && req->length >= sizeof(key)) {
2773 45 : struct spdk_iov_xfer ix;
2774 45 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
2775 45 : spdk_iov_xfer_to_buf(&ix, &key, sizeof(key));
2776 : } else {
2777 0 : SPDK_ERRLOG("No key provided. Failing request.\n");
2778 0 : status = SPDK_NVME_SC_INVALID_FIELD;
2779 0 : goto exit;
2780 : }
2781 :
2782 45 : SPDK_DEBUGLOG(nvmf, "REGISTER: RREGA %u, IEKEY %u, CPTPL %u, "
2783 : "NRKEY 0x%"PRIx64", NRKEY 0x%"PRIx64"\n",
2784 : rrega, iekey, cptpl, key.crkey, key.nrkey);
2785 :
2786 45 : if (cptpl == SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON) {
2787 : /* Ture to OFF state, and need to be updated in the configuration file */
2788 1 : if (ns->ptpl_activated) {
2789 1 : ns->ptpl_activated = 0;
2790 1 : update_sgroup = true;
2791 : }
2792 44 : } else if (cptpl == SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS) {
2793 4 : if (!nvmf_ns_is_ptpl_capable(ns)) {
2794 1 : status = SPDK_NVME_SC_INVALID_FIELD;
2795 1 : goto exit;
2796 3 : } else if (ns->ptpl_activated == 0) {
2797 3 : ns->ptpl_activated = 1;
2798 3 : update_sgroup = true;
2799 : }
2800 : }
2801 :
2802 : /* current Host Identifier has registrant or not */
2803 44 : reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2804 :
2805 44 : switch (rrega) {
2806 36 : case SPDK_NVME_RESERVE_REGISTER_KEY:
2807 36 : if (!reg) {
2808 : /* register new controller */
2809 27 : if (key.nrkey == 0) {
2810 0 : SPDK_ERRLOG("Can't register zeroed new key\n");
2811 0 : status = SPDK_NVME_SC_INVALID_FIELD;
2812 0 : goto exit;
2813 : }
2814 27 : rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey);
2815 27 : if (rc < 0) {
2816 0 : status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2817 0 : goto exit;
2818 : }
2819 27 : update_sgroup = true;
2820 : } else {
2821 : /* register with same key is not an error */
2822 9 : if (reg->rkey != key.nrkey) {
2823 8 : SPDK_ERRLOG("The same host already register a "
2824 : "key with 0x%"PRIx64"\n",
2825 : reg->rkey);
2826 8 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2827 8 : goto exit;
2828 : }
2829 : }
2830 28 : break;
2831 4 : case SPDK_NVME_RESERVE_UNREGISTER_KEY:
2832 4 : if (!reg || (!iekey && reg->rkey != key.crkey)) {
2833 0 : SPDK_ERRLOG("No registrant or current key doesn't match "
2834 : "with existing registrant key\n");
2835 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2836 0 : goto exit;
2837 : }
2838 :
2839 4 : rtype = ns->rtype;
2840 4 : num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2841 : SPDK_NVMF_MAX_NUM_REGISTRANTS,
2842 : &ctrlr->hostid);
2843 :
2844 4 : nvmf_ns_reservation_remove_registrant(ns, reg);
2845 :
2846 4 : if (!ns->rtype && num_hostid && (rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY ||
2847 : rtype == SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY)) {
2848 1 : nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
2849 : hostid_list,
2850 : num_hostid,
2851 : SPDK_NVME_RESERVATION_RELEASED);
2852 : }
2853 4 : update_sgroup = true;
2854 4 : break;
2855 4 : case SPDK_NVME_RESERVE_REPLACE_KEY:
2856 4 : if (key.nrkey == 0) {
2857 0 : SPDK_ERRLOG("Can't register zeroed new key\n");
2858 0 : status = SPDK_NVME_SC_INVALID_FIELD;
2859 0 : goto exit;
2860 : }
2861 : /* Registrant exists */
2862 4 : if (reg) {
2863 2 : if (!iekey && reg->rkey != key.crkey) {
2864 0 : SPDK_ERRLOG("Current key doesn't match "
2865 : "existing registrant key\n");
2866 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2867 0 : goto exit;
2868 : }
2869 2 : if (reg->rkey == key.nrkey) {
2870 0 : goto exit;
2871 : }
2872 2 : reg->rkey = key.nrkey;
2873 2 : } else if (iekey) { /* No registrant but IEKEY is set */
2874 : /* new registrant */
2875 1 : rc = nvmf_ns_reservation_add_registrant(ns, ctrlr, key.nrkey);
2876 1 : if (rc < 0) {
2877 0 : status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2878 0 : goto exit;
2879 : }
2880 : } else { /* No registrant */
2881 1 : SPDK_ERRLOG("No registrant\n");
2882 1 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2883 1 : goto exit;
2884 :
2885 : }
2886 3 : update_sgroup = true;
2887 3 : break;
2888 0 : default:
2889 0 : status = SPDK_NVME_SC_INVALID_FIELD;
2890 0 : goto exit;
2891 : }
2892 :
2893 45 : exit:
2894 45 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2895 45 : req->rsp->nvme_cpl.status.sc = status;
2896 45 : return update_sgroup;
2897 : }
2898 :
2899 : static bool
2900 13 : nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns,
2901 : struct spdk_nvmf_ctrlr *ctrlr,
2902 : struct spdk_nvmf_request *req)
2903 : {
2904 13 : struct spdk_nvme_reservation_acquire_data key = { 0 };
2905 13 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
2906 : uint8_t racqa, iekey, rtype;
2907 : struct spdk_nvmf_registrant *reg;
2908 13 : bool all_regs = false;
2909 13 : uint32_t count = 0;
2910 13 : bool update_sgroup = true;
2911 13 : struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2912 13 : uint32_t num_hostid = 0;
2913 13 : struct spdk_uuid new_hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
2914 13 : uint32_t new_num_hostid = 0;
2915 13 : bool reservation_released = false;
2916 13 : uint8_t status = SPDK_NVME_SC_SUCCESS;
2917 :
2918 13 : racqa = cmd->cdw10_bits.resv_acquire.racqa;
2919 13 : iekey = cmd->cdw10_bits.resv_acquire.iekey;
2920 13 : rtype = cmd->cdw10_bits.resv_acquire.rtype;
2921 :
2922 13 : if (req->iovcnt > 0 && req->length >= sizeof(key)) {
2923 13 : struct spdk_iov_xfer ix;
2924 13 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
2925 13 : spdk_iov_xfer_to_buf(&ix, &key, sizeof(key));
2926 : } else {
2927 0 : SPDK_ERRLOG("No key provided. Failing request.\n");
2928 0 : status = SPDK_NVME_SC_INVALID_FIELD;
2929 0 : goto exit;
2930 : }
2931 :
2932 13 : SPDK_DEBUGLOG(nvmf, "ACQUIRE: RACQA %u, IEKEY %u, RTYPE %u, "
2933 : "NRKEY 0x%"PRIx64", PRKEY 0x%"PRIx64"\n",
2934 : racqa, iekey, rtype, key.crkey, key.prkey);
2935 :
2936 13 : if (iekey || rtype > SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS) {
2937 0 : SPDK_ERRLOG("Ignore existing key field set to 1\n");
2938 0 : status = SPDK_NVME_SC_INVALID_FIELD;
2939 0 : update_sgroup = false;
2940 0 : goto exit;
2941 : }
2942 :
2943 13 : reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
2944 : /* must be registrant and CRKEY must match */
2945 13 : if (!reg || reg->rkey != key.crkey) {
2946 0 : SPDK_ERRLOG("No registrant or current key doesn't match "
2947 : "with existing registrant key\n");
2948 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2949 0 : update_sgroup = false;
2950 0 : goto exit;
2951 : }
2952 :
2953 13 : all_regs = nvmf_ns_reservation_all_registrants_type(ns);
2954 :
2955 13 : switch (racqa) {
2956 10 : case SPDK_NVME_RESERVE_ACQUIRE:
2957 : /* it's not an error for the holder to acquire same reservation type again */
2958 10 : if (nvmf_ns_reservation_registrant_is_holder(ns, reg) && ns->rtype == rtype) {
2959 : /* do nothing */
2960 0 : update_sgroup = false;
2961 10 : } else if (ns->holder == NULL) {
2962 : /* first time to acquire the reservation */
2963 10 : nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
2964 : } else {
2965 0 : SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n");
2966 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
2967 0 : update_sgroup = false;
2968 0 : goto exit;
2969 : }
2970 10 : break;
2971 3 : case SPDK_NVME_RESERVE_PREEMPT:
2972 : /* no reservation holder */
2973 3 : if (!ns->holder) {
2974 : /* unregister with PRKEY */
2975 0 : nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2976 0 : break;
2977 : }
2978 3 : num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
2979 : SPDK_NVMF_MAX_NUM_REGISTRANTS,
2980 : &ctrlr->hostid);
2981 :
2982 : /* only 1 reservation holder and reservation key is valid */
2983 3 : if (!all_regs) {
2984 : /* preempt itself */
2985 2 : if (nvmf_ns_reservation_registrant_is_holder(ns, reg) &&
2986 0 : ns->crkey == key.prkey) {
2987 0 : ns->rtype = rtype;
2988 0 : reservation_released = true;
2989 0 : break;
2990 : }
2991 :
2992 2 : if (ns->crkey == key.prkey) {
2993 2 : nvmf_ns_reservation_remove_registrant(ns, ns->holder);
2994 2 : nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
2995 2 : reservation_released = true;
2996 0 : } else if (key.prkey != 0) {
2997 0 : nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
2998 : } else {
2999 : /* PRKEY is zero */
3000 0 : SPDK_ERRLOG("Current PRKEY is zero\n");
3001 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
3002 0 : update_sgroup = false;
3003 0 : goto exit;
3004 : }
3005 : } else {
3006 : /* release all other registrants except for the current one */
3007 1 : if (key.prkey == 0) {
3008 1 : nvmf_ns_reservation_remove_all_other_registrants(ns, reg);
3009 1 : assert(ns->holder == reg);
3010 : } else {
3011 0 : count = nvmf_ns_reservation_remove_registrants_by_key(ns, key.prkey);
3012 0 : if (count == 0) {
3013 0 : SPDK_ERRLOG("PRKEY doesn't match any registrant\n");
3014 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
3015 0 : update_sgroup = false;
3016 0 : goto exit;
3017 : }
3018 : }
3019 : }
3020 3 : break;
3021 0 : default:
3022 0 : status = SPDK_NVME_SC_INVALID_FIELD;
3023 0 : update_sgroup = false;
3024 0 : break;
3025 : }
3026 :
3027 13 : exit:
3028 13 : if (update_sgroup && racqa == SPDK_NVME_RESERVE_PREEMPT) {
3029 3 : new_num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, new_hostid_list,
3030 : SPDK_NVMF_MAX_NUM_REGISTRANTS,
3031 : &ctrlr->hostid);
3032 : /* Preempt notification occurs on the unregistered controllers
3033 : * other than the controller who issued the command.
3034 : */
3035 3 : num_hostid = nvmf_ns_reservation_get_unregistered_hostid(hostid_list,
3036 : num_hostid,
3037 : new_hostid_list,
3038 : new_num_hostid);
3039 3 : if (num_hostid) {
3040 3 : nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
3041 : hostid_list,
3042 : num_hostid,
3043 : SPDK_NVME_REGISTRATION_PREEMPTED);
3044 :
3045 : }
3046 : /* Reservation released notification occurs on the
3047 : * controllers which are the remaining registrants other than
3048 : * the controller who issued the command.
3049 : */
3050 3 : if (reservation_released && new_num_hostid) {
3051 2 : nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
3052 : new_hostid_list,
3053 : new_num_hostid,
3054 : SPDK_NVME_RESERVATION_RELEASED);
3055 :
3056 : }
3057 : }
3058 13 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3059 13 : req->rsp->nvme_cpl.status.sc = status;
3060 13 : return update_sgroup;
3061 : }
3062 :
3063 : static bool
3064 6 : nvmf_ns_reservation_release(struct spdk_nvmf_ns *ns,
3065 : struct spdk_nvmf_ctrlr *ctrlr,
3066 : struct spdk_nvmf_request *req)
3067 : {
3068 6 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
3069 : uint8_t rrela, iekey, rtype;
3070 : struct spdk_nvmf_registrant *reg;
3071 6 : uint64_t crkey = 0;
3072 6 : uint8_t status = SPDK_NVME_SC_SUCCESS;
3073 6 : bool update_sgroup = true;
3074 6 : struct spdk_uuid hostid_list[SPDK_NVMF_MAX_NUM_REGISTRANTS];
3075 6 : uint32_t num_hostid = 0;
3076 :
3077 6 : rrela = cmd->cdw10_bits.resv_release.rrela;
3078 6 : iekey = cmd->cdw10_bits.resv_release.iekey;
3079 6 : rtype = cmd->cdw10_bits.resv_release.rtype;
3080 :
3081 6 : if (req->iovcnt > 0 && req->length >= sizeof(crkey)) {
3082 6 : struct spdk_iov_xfer ix;
3083 6 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
3084 6 : spdk_iov_xfer_to_buf(&ix, &crkey, sizeof(crkey));
3085 : } else {
3086 0 : SPDK_ERRLOG("No key provided. Failing request.\n");
3087 0 : status = SPDK_NVME_SC_INVALID_FIELD;
3088 0 : goto exit;
3089 : }
3090 :
3091 6 : SPDK_DEBUGLOG(nvmf, "RELEASE: RRELA %u, IEKEY %u, RTYPE %u, "
3092 : "CRKEY 0x%"PRIx64"\n", rrela, iekey, rtype, crkey);
3093 :
3094 6 : if (iekey) {
3095 0 : SPDK_ERRLOG("Ignore existing key field set to 1\n");
3096 0 : status = SPDK_NVME_SC_INVALID_FIELD;
3097 0 : update_sgroup = false;
3098 0 : goto exit;
3099 : }
3100 :
3101 6 : reg = nvmf_ns_reservation_get_registrant(ns, &ctrlr->hostid);
3102 6 : if (!reg || reg->rkey != crkey) {
3103 0 : SPDK_ERRLOG("No registrant or current key doesn't match "
3104 : "with existing registrant key\n");
3105 0 : status = SPDK_NVME_SC_RESERVATION_CONFLICT;
3106 0 : update_sgroup = false;
3107 0 : goto exit;
3108 : }
3109 :
3110 6 : num_hostid = nvmf_ns_reservation_get_all_other_hostid(ns, hostid_list,
3111 : SPDK_NVMF_MAX_NUM_REGISTRANTS,
3112 : &ctrlr->hostid);
3113 :
3114 6 : switch (rrela) {
3115 4 : case SPDK_NVME_RESERVE_RELEASE:
3116 4 : if (!ns->holder) {
3117 0 : SPDK_DEBUGLOG(nvmf, "RELEASE: no holder\n");
3118 0 : update_sgroup = false;
3119 0 : goto exit;
3120 : }
3121 4 : if (ns->rtype != rtype) {
3122 0 : SPDK_ERRLOG("Type doesn't match\n");
3123 0 : status = SPDK_NVME_SC_INVALID_FIELD;
3124 0 : update_sgroup = false;
3125 0 : goto exit;
3126 : }
3127 4 : if (!nvmf_ns_reservation_registrant_is_holder(ns, reg)) {
3128 : /* not the reservation holder, this isn't an error */
3129 0 : update_sgroup = false;
3130 0 : goto exit;
3131 : }
3132 :
3133 4 : rtype = ns->rtype;
3134 4 : nvmf_ns_reservation_release_reservation(ns);
3135 :
3136 4 : if (num_hostid && rtype != SPDK_NVME_RESERVE_WRITE_EXCLUSIVE &&
3137 : rtype != SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS) {
3138 2 : nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
3139 : hostid_list,
3140 : num_hostid,
3141 : SPDK_NVME_RESERVATION_RELEASED);
3142 : }
3143 4 : break;
3144 2 : case SPDK_NVME_RESERVE_CLEAR:
3145 2 : nvmf_ns_reservation_clear_all_registrants(ns);
3146 2 : if (num_hostid) {
3147 2 : nvmf_subsystem_gen_ctrlr_notification(ns->subsystem, ns,
3148 : hostid_list,
3149 : num_hostid,
3150 : SPDK_NVME_RESERVATION_PREEMPTED);
3151 : }
3152 2 : break;
3153 0 : default:
3154 0 : status = SPDK_NVME_SC_INVALID_FIELD;
3155 0 : update_sgroup = false;
3156 0 : goto exit;
3157 : }
3158 :
3159 6 : exit:
3160 6 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3161 6 : req->rsp->nvme_cpl.status.sc = status;
3162 6 : return update_sgroup;
3163 : }
3164 :
3165 : static void
3166 3 : nvmf_ns_reservation_report(struct spdk_nvmf_ns *ns,
3167 : struct spdk_nvmf_ctrlr *ctrlr,
3168 : struct spdk_nvmf_request *req)
3169 : {
3170 3 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
3171 : struct spdk_nvmf_registrant *reg, *tmp;
3172 3 : struct spdk_nvme_reservation_status_extended_data status_data = { 0 };
3173 3 : struct spdk_iov_xfer ix;
3174 : uint32_t transfer_len;
3175 3 : uint32_t regctl = 0;
3176 3 : uint8_t status = SPDK_NVME_SC_SUCCESS;
3177 :
3178 3 : if (req->iovcnt == 0) {
3179 0 : SPDK_ERRLOG("No data transfer specified for request. "
3180 : " Unable to transfer back response.\n");
3181 0 : status = SPDK_NVME_SC_INVALID_FIELD;
3182 0 : goto exit;
3183 : }
3184 :
3185 3 : if (!cmd->cdw11_bits.resv_report.eds) {
3186 1 : SPDK_ERRLOG("NVMeoF uses extended controller data structure, "
3187 : "please set EDS bit in cdw11 and try again\n");
3188 1 : status = SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT;
3189 1 : goto exit;
3190 : }
3191 :
3192 : /* Number of Dwords of the Reservation Status data structure to transfer */
3193 2 : transfer_len = (cmd->cdw10 + 1) * sizeof(uint32_t);
3194 :
3195 2 : if (transfer_len < sizeof(struct spdk_nvme_reservation_status_extended_data)) {
3196 1 : status = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
3197 1 : goto exit;
3198 : }
3199 :
3200 1 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
3201 :
3202 1 : status_data.data.gen = ns->gen;
3203 1 : status_data.data.rtype = ns->rtype;
3204 1 : status_data.data.ptpls = ns->ptpl_activated;
3205 :
3206 3 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
3207 2 : regctl++;
3208 : }
3209 :
3210 : /*
3211 : * We report the number of registrants as per the spec here, even if
3212 : * the iov isn't big enough to contain them all. In that case, the
3213 : * spdk_iov_xfer_from_buf() won't actually copy any of the remaining
3214 : * data; as it keeps track of the iov cursor itself, it's simplest to
3215 : * just walk the entire list anyway.
3216 : */
3217 1 : status_data.data.regctl = regctl;
3218 :
3219 1 : spdk_iov_xfer_from_buf(&ix, &status_data, sizeof(status_data));
3220 :
3221 3 : TAILQ_FOREACH_SAFE(reg, &ns->registrants, link, tmp) {
3222 2 : struct spdk_nvme_registered_ctrlr_extended_data ctrlr_data = { 0 };
3223 :
3224 : /* Set to 0xffffh for dynamic controller */
3225 2 : ctrlr_data.cntlid = 0xffff;
3226 2 : ctrlr_data.rcsts.status = (ns->holder == reg) ? true : false;
3227 2 : ctrlr_data.rkey = reg->rkey;
3228 2 : spdk_uuid_copy((struct spdk_uuid *)ctrlr_data.hostid, ®->hostid);
3229 :
3230 2 : spdk_iov_xfer_from_buf(&ix, &ctrlr_data, sizeof(ctrlr_data));
3231 : }
3232 :
3233 3 : exit:
3234 3 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
3235 3 : req->rsp->nvme_cpl.status.sc = status;
3236 3 : return;
3237 : }
3238 :
3239 : static void
3240 0 : nvmf_ns_reservation_complete(void *ctx)
3241 : {
3242 0 : struct spdk_nvmf_request *req = ctx;
3243 :
3244 0 : spdk_nvmf_request_complete(req);
3245 0 : }
3246 :
3247 : static void
3248 0 : _nvmf_ns_reservation_update_done(struct spdk_nvmf_subsystem *subsystem,
3249 : void *cb_arg, int status)
3250 : {
3251 0 : struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)cb_arg;
3252 0 : struct spdk_nvmf_poll_group *group = req->qpair->group;
3253 :
3254 0 : spdk_thread_send_msg(group->thread, nvmf_ns_reservation_complete, req);
3255 0 : }
3256 :
3257 : void
3258 0 : nvmf_ns_reservation_request(void *ctx)
3259 : {
3260 0 : struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)ctx;
3261 0 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
3262 0 : struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3263 : uint32_t nsid;
3264 : struct spdk_nvmf_ns *ns;
3265 0 : bool update_sgroup = false;
3266 0 : int status = 0;
3267 :
3268 0 : nsid = cmd->nsid;
3269 0 : ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid);
3270 0 : assert(ns != NULL);
3271 :
3272 0 : switch (cmd->opc) {
3273 0 : case SPDK_NVME_OPC_RESERVATION_REGISTER:
3274 0 : update_sgroup = nvmf_ns_reservation_register(ns, ctrlr, req);
3275 0 : break;
3276 0 : case SPDK_NVME_OPC_RESERVATION_ACQUIRE:
3277 0 : update_sgroup = nvmf_ns_reservation_acquire(ns, ctrlr, req);
3278 0 : break;
3279 0 : case SPDK_NVME_OPC_RESERVATION_RELEASE:
3280 0 : update_sgroup = nvmf_ns_reservation_release(ns, ctrlr, req);
3281 0 : break;
3282 0 : case SPDK_NVME_OPC_RESERVATION_REPORT:
3283 0 : nvmf_ns_reservation_report(ns, ctrlr, req);
3284 0 : break;
3285 0 : default:
3286 0 : break;
3287 : }
3288 :
3289 : /* update reservation information to subsystem's poll group */
3290 0 : if (update_sgroup) {
3291 0 : if (ns->ptpl_activated || cmd->opc == SPDK_NVME_OPC_RESERVATION_REGISTER) {
3292 0 : if (nvmf_ns_update_reservation_info(ns) != 0) {
3293 0 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
3294 : }
3295 : }
3296 0 : status = nvmf_subsystem_update_ns(ctrlr->subsys, _nvmf_ns_reservation_update_done, req);
3297 0 : if (status == 0) {
3298 0 : return;
3299 : }
3300 : }
3301 :
3302 0 : _nvmf_ns_reservation_update_done(ctrlr->subsys, req, status);
3303 : }
3304 :
3305 : static bool
3306 10 : nvmf_ns_is_ptpl_capable_json(const struct spdk_nvmf_ns *ns)
3307 : {
3308 10 : return ns->ptpl_file != NULL;
3309 : }
3310 :
3311 : static struct spdk_nvmf_ns_reservation_ops g_reservation_ops = {
3312 : .is_ptpl_capable = nvmf_ns_is_ptpl_capable_json,
3313 : .update = nvmf_ns_reservation_update_json,
3314 : .load = nvmf_ns_reservation_load_json,
3315 : };
3316 :
3317 : bool
3318 14 : nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns)
3319 : {
3320 14 : return g_reservation_ops.is_ptpl_capable(ns);
3321 : }
3322 :
3323 : static int
3324 7 : nvmf_ns_reservation_update(const struct spdk_nvmf_ns *ns,
3325 : const struct spdk_nvmf_reservation_info *info)
3326 : {
3327 7 : return g_reservation_ops.update(ns, info);
3328 : }
3329 :
3330 : static int
3331 6 : nvmf_ns_reservation_load(const struct spdk_nvmf_ns *ns, struct spdk_nvmf_reservation_info *info)
3332 : {
3333 6 : return g_reservation_ops.load(ns, info);
3334 : }
3335 :
3336 : void
3337 1 : spdk_nvmf_set_custom_ns_reservation_ops(const struct spdk_nvmf_ns_reservation_ops *ops)
3338 : {
3339 1 : g_reservation_ops = *ops;
3340 1 : }
3341 :
3342 : int
3343 0 : spdk_nvmf_subsystem_set_ana_reporting(struct spdk_nvmf_subsystem *subsystem,
3344 : bool ana_reporting)
3345 : {
3346 0 : if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) {
3347 0 : return -EAGAIN;
3348 : }
3349 :
3350 0 : subsystem->flags.ana_reporting = ana_reporting;
3351 :
3352 0 : return 0;
3353 : }
3354 :
3355 : bool
3356 0 : spdk_nvmf_subsystem_get_ana_reporting(struct spdk_nvmf_subsystem *subsystem)
3357 : {
3358 0 : return subsystem->flags.ana_reporting;
3359 : }
3360 :
3361 : struct subsystem_listener_update_ctx {
3362 : struct spdk_nvmf_subsystem_listener *listener;
3363 :
3364 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
3365 : void *cb_arg;
3366 : };
3367 :
3368 : static void
3369 0 : subsystem_listener_update_done(struct spdk_io_channel_iter *i, int status)
3370 : {
3371 0 : struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
3372 :
3373 0 : if (ctx->cb_fn) {
3374 0 : ctx->cb_fn(ctx->cb_arg, status);
3375 : }
3376 0 : free(ctx);
3377 0 : }
3378 :
3379 : static void
3380 0 : subsystem_listener_update_on_pg(struct spdk_io_channel_iter *i)
3381 : {
3382 0 : struct subsystem_listener_update_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
3383 : struct spdk_nvmf_subsystem_listener *listener;
3384 : struct spdk_nvmf_poll_group *group;
3385 : struct spdk_nvmf_ctrlr *ctrlr;
3386 :
3387 0 : listener = ctx->listener;
3388 0 : group = spdk_io_channel_get_ctx(spdk_io_channel_iter_get_channel(i));
3389 :
3390 0 : TAILQ_FOREACH(ctrlr, &listener->subsystem->ctrlrs, link) {
3391 0 : if (ctrlr->thread != spdk_get_thread()) {
3392 0 : continue;
3393 : }
3394 :
3395 0 : if (ctrlr->admin_qpair && ctrlr->admin_qpair->group == group && ctrlr->listener == listener) {
3396 0 : nvmf_ctrlr_async_event_ana_change_notice(ctrlr);
3397 : }
3398 : }
3399 :
3400 0 : spdk_for_each_channel_continue(i, 0);
3401 0 : }
3402 :
3403 : void
3404 0 : spdk_nvmf_subsystem_set_ana_state(struct spdk_nvmf_subsystem *subsystem,
3405 : const struct spdk_nvme_transport_id *trid,
3406 : enum spdk_nvme_ana_state ana_state, uint32_t anagrpid,
3407 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn, void *cb_arg)
3408 : {
3409 : struct spdk_nvmf_subsystem_listener *listener;
3410 : struct subsystem_listener_update_ctx *ctx;
3411 : uint32_t i;
3412 :
3413 0 : assert(cb_fn != NULL);
3414 0 : assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE ||
3415 : subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
3416 :
3417 0 : if (!subsystem->flags.ana_reporting) {
3418 0 : SPDK_ERRLOG("ANA reporting is disabled\n");
3419 0 : cb_fn(cb_arg, -EINVAL);
3420 0 : return;
3421 : }
3422 :
3423 : /* ANA Change state is not used, ANA Persistent Loss state
3424 : * is not supported yet.
3425 : */
3426 0 : if (!(ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE ||
3427 : ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE ||
3428 : ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE)) {
3429 0 : SPDK_ERRLOG("ANA state %d is not supported\n", ana_state);
3430 0 : cb_fn(cb_arg, -ENOTSUP);
3431 0 : return;
3432 : }
3433 :
3434 0 : if (anagrpid > subsystem->max_nsid) {
3435 0 : SPDK_ERRLOG("ANA group ID %" PRIu32 " is more than maximum\n", anagrpid);
3436 0 : cb_fn(cb_arg, -EINVAL);
3437 0 : return;
3438 : }
3439 :
3440 0 : listener = nvmf_subsystem_find_listener(subsystem, trid);
3441 0 : if (!listener) {
3442 0 : SPDK_ERRLOG("Unable to find listener.\n");
3443 0 : cb_fn(cb_arg, -EINVAL);
3444 0 : return;
3445 : }
3446 :
3447 0 : if (anagrpid != 0 && listener->ana_state[anagrpid - 1] == ana_state) {
3448 0 : cb_fn(cb_arg, 0);
3449 0 : return;
3450 : }
3451 :
3452 0 : ctx = calloc(1, sizeof(*ctx));
3453 0 : if (!ctx) {
3454 0 : SPDK_ERRLOG("Unable to allocate context\n");
3455 0 : cb_fn(cb_arg, -ENOMEM);
3456 0 : return;
3457 : }
3458 :
3459 0 : for (i = 1; i <= subsystem->max_nsid; i++) {
3460 0 : if (anagrpid == 0 || i == anagrpid) {
3461 0 : listener->ana_state[i - 1] = ana_state;
3462 : }
3463 : }
3464 0 : listener->ana_state_change_count++;
3465 :
3466 0 : ctx->listener = listener;
3467 0 : ctx->cb_fn = cb_fn;
3468 0 : ctx->cb_arg = cb_arg;
3469 :
3470 0 : spdk_for_each_channel(subsystem->tgt,
3471 : subsystem_listener_update_on_pg,
3472 : ctx,
3473 : subsystem_listener_update_done);
3474 : }
3475 :
3476 : bool
3477 0 : spdk_nvmf_subsystem_is_discovery(struct spdk_nvmf_subsystem *subsystem)
3478 : {
3479 0 : return subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT ||
3480 0 : subsystem->subtype == SPDK_NVMF_SUBTYPE_DISCOVERY;
3481 : }
3482 :
3483 : bool
3484 0 : nvmf_nqn_is_discovery(const char *nqn)
3485 : {
3486 0 : return strcmp(nqn, SPDK_NVMF_DISCOVERY_NQN) == 0;
3487 : }
|