Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "nvmf_internal.h"
10 : #include "transport.h"
11 :
12 : #include "spdk/config.h"
13 : #include "spdk/log.h"
14 : #include "spdk/nvmf.h"
15 : #include "spdk/nvmf_transport.h"
16 : #include "spdk/queue.h"
17 : #include "spdk/util.h"
18 : #include "spdk_internal/usdt.h"
19 :
20 : #define NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS 120000
21 :
22 : struct nvmf_transport_ops_list_element {
23 : struct spdk_nvmf_transport_ops ops;
24 : TAILQ_ENTRY(nvmf_transport_ops_list_element) link;
25 : };
26 :
27 : TAILQ_HEAD(nvmf_transport_ops_list, nvmf_transport_ops_list_element)
28 : g_spdk_nvmf_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_transport_ops);
29 :
30 : static inline const struct spdk_nvmf_transport_ops *
31 21 : nvmf_get_transport_ops(const char *transport_name)
32 : {
33 : struct nvmf_transport_ops_list_element *ops;
34 39 : TAILQ_FOREACH(ops, &g_spdk_nvmf_transport_ops, link) {
35 31 : if (strcasecmp(transport_name, ops->ops.name) == 0) {
36 13 : return &ops->ops;
37 : }
38 : }
39 8 : return NULL;
40 : }
41 :
42 : void
43 6 : spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
44 : {
45 : struct nvmf_transport_ops_list_element *new_ops;
46 :
47 6 : if (nvmf_get_transport_ops(ops->name) != NULL) {
48 0 : SPDK_ERRLOG("Double registering nvmf transport type %s.\n", ops->name);
49 0 : assert(false);
50 : return;
51 : }
52 :
53 6 : new_ops = calloc(1, sizeof(*new_ops));
54 6 : if (new_ops == NULL) {
55 0 : SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
56 0 : assert(false);
57 : return;
58 : }
59 :
60 6 : new_ops->ops = *ops;
61 :
62 6 : TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
63 : }
64 :
65 : const struct spdk_nvmf_transport_opts *
66 0 : spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
67 : {
68 0 : return &transport->opts;
69 : }
70 :
71 : void
72 0 : nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
73 : bool named)
74 : {
75 0 : const struct spdk_nvmf_transport_opts *opts = spdk_nvmf_get_transport_opts(transport);
76 :
77 0 : named ? spdk_json_write_named_object_begin(w, "params") : spdk_json_write_object_begin(w);
78 :
79 0 : spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
80 0 : spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
81 0 : spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
82 0 : spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
83 0 : spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
84 0 : spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
85 0 : spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
86 0 : spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
87 0 : spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
88 0 : spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
89 0 : spdk_json_write_named_bool(w, "zcopy", opts->zcopy);
90 :
91 0 : if (transport->ops->dump_opts) {
92 0 : transport->ops->dump_opts(transport, w);
93 : }
94 :
95 0 : spdk_json_write_named_uint32(w, "abort_timeout_sec", opts->abort_timeout_sec);
96 0 : spdk_json_write_object_end(w);
97 0 : }
98 :
99 : void
100 0 : nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid,
101 : struct spdk_json_write_ctx *w)
102 : {
103 0 : const char *adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
104 :
105 0 : spdk_json_write_named_string(w, "trtype", trid->trstring);
106 0 : spdk_json_write_named_string(w, "adrfam", adrfam ? adrfam : "unknown");
107 0 : spdk_json_write_named_string(w, "traddr", trid->traddr);
108 0 : spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
109 0 : }
110 :
111 : spdk_nvme_transport_type_t
112 0 : spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
113 : {
114 0 : return transport->ops->type;
115 : }
116 :
117 : const char *
118 0 : spdk_nvmf_get_transport_name(struct spdk_nvmf_transport *transport)
119 : {
120 0 : return transport->ops->name;
121 : }
122 :
123 : static void
124 8 : nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
125 : struct spdk_nvmf_transport_opts *opts_src,
126 : size_t opts_size)
127 : {
128 8 : assert(opts);
129 8 : assert(opts_src);
130 :
131 8 : opts->opts_size = opts_size;
132 :
133 : #define SET_FIELD(field) \
134 : if (offsetof(struct spdk_nvmf_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
135 : opts->field = opts_src->field; \
136 : } \
137 :
138 8 : SET_FIELD(max_queue_depth);
139 8 : SET_FIELD(max_qpairs_per_ctrlr);
140 8 : SET_FIELD(in_capsule_data_size);
141 8 : SET_FIELD(max_io_size);
142 8 : SET_FIELD(io_unit_size);
143 8 : SET_FIELD(max_aq_depth);
144 8 : SET_FIELD(buf_cache_size);
145 8 : SET_FIELD(num_shared_buffers);
146 8 : SET_FIELD(dif_insert_or_strip);
147 8 : SET_FIELD(abort_timeout_sec);
148 8 : SET_FIELD(association_timeout);
149 8 : SET_FIELD(transport_specific);
150 8 : SET_FIELD(acceptor_poll_rate);
151 8 : SET_FIELD(zcopy);
152 :
153 : /* Do not remove this statement, you should always update this statement when you adding a new field,
154 : * and do not forget to add the SET_FIELD statement for your added field. */
155 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 64, "Incorrect size");
156 :
157 : #undef SET_FIELD
158 : #undef FILED_CHECK
159 8 : }
160 :
161 : struct nvmf_transport_create_ctx {
162 : const struct spdk_nvmf_transport_ops *ops;
163 : struct spdk_nvmf_transport_opts opts;
164 : void *cb_arg;
165 : spdk_nvmf_transport_create_done_cb cb_fn;
166 : };
167 :
168 : static bool
169 34 : nvmf_transport_use_iobuf(struct spdk_nvmf_transport *transport)
170 : {
171 34 : return transport->opts.num_shared_buffers || transport->opts.buf_cache_size;
172 : }
173 :
174 : static void
175 4 : nvmf_transport_create_async_done(void *cb_arg, struct spdk_nvmf_transport *transport)
176 : {
177 4 : struct nvmf_transport_create_ctx *ctx = cb_arg;
178 : int chars_written;
179 :
180 4 : if (!transport) {
181 0 : SPDK_ERRLOG("Failed to create transport.\n");
182 0 : goto err;
183 : }
184 :
185 4 : pthread_mutex_init(&transport->mutex, NULL);
186 4 : TAILQ_INIT(&transport->listeners);
187 4 : transport->ops = ctx->ops;
188 4 : transport->opts = ctx->opts;
189 4 : chars_written = snprintf(transport->iobuf_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s", "nvmf",
190 4 : transport->ops->name);
191 4 : if (chars_written < 0) {
192 0 : SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
193 0 : goto err;
194 : }
195 :
196 4 : if (nvmf_transport_use_iobuf(transport)) {
197 3 : spdk_iobuf_register_module(transport->iobuf_name);
198 : }
199 :
200 4 : ctx->cb_fn(ctx->cb_arg, transport);
201 4 : free(ctx);
202 4 : return;
203 :
204 0 : err:
205 0 : if (transport) {
206 0 : transport->ops->destroy(transport, NULL, NULL);
207 : }
208 :
209 0 : ctx->cb_fn(ctx->cb_arg, NULL);
210 0 : free(ctx);
211 : }
212 :
213 : static void
214 1 : _nvmf_transport_create_done(void *ctx)
215 : {
216 1 : struct nvmf_transport_create_ctx *_ctx = (struct nvmf_transport_create_ctx *)ctx;
217 :
218 1 : nvmf_transport_create_async_done(_ctx, _ctx->ops->create(&_ctx->opts));
219 1 : }
220 :
221 : static int
222 8 : nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
223 : spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg, bool sync)
224 : {
225 : struct nvmf_transport_create_ctx *ctx;
226 8 : struct spdk_iobuf_opts opts_iobuf = {};
227 : int rc;
228 : uint64_t count;
229 :
230 8 : ctx = calloc(1, sizeof(*ctx));
231 8 : if (!ctx) {
232 0 : return -ENOMEM;
233 : }
234 :
235 8 : if (!opts) {
236 0 : SPDK_ERRLOG("opts should not be NULL\n");
237 0 : goto err;
238 : }
239 :
240 8 : if (!opts->opts_size) {
241 0 : SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
242 0 : goto err;
243 : }
244 :
245 8 : ctx->ops = nvmf_get_transport_ops(transport_name);
246 8 : if (!ctx->ops) {
247 1 : SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
248 1 : goto err;
249 : }
250 :
251 7 : nvmf_transport_opts_copy(&ctx->opts, opts, opts->opts_size);
252 7 : if (ctx->opts.max_io_size != 0 && (!spdk_u32_is_pow2(ctx->opts.max_io_size) ||
253 6 : ctx->opts.max_io_size < 8192)) {
254 1 : SPDK_ERRLOG("max_io_size %u must be a power of 2 and be greater than or equal 8KB\n",
255 : ctx->opts.max_io_size);
256 1 : goto err;
257 : }
258 :
259 6 : if (ctx->opts.max_aq_depth < SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE) {
260 1 : SPDK_ERRLOG("max_aq_depth %u is less than minimum defined by NVMf spec, use min value\n",
261 : ctx->opts.max_aq_depth);
262 1 : ctx->opts.max_aq_depth = SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE;
263 : }
264 :
265 6 : spdk_iobuf_get_opts(&opts_iobuf);
266 6 : if (ctx->opts.io_unit_size == 0) {
267 1 : SPDK_ERRLOG("io_unit_size cannot be 0\n");
268 1 : goto err;
269 : }
270 5 : if (ctx->opts.io_unit_size > opts_iobuf.large_bufsize) {
271 1 : SPDK_ERRLOG("io_unit_size %u is larger than iobuf pool large buffer size %d\n",
272 : ctx->opts.io_unit_size, opts_iobuf.large_bufsize);
273 1 : goto err;
274 : }
275 :
276 4 : if (ctx->opts.io_unit_size <= opts_iobuf.small_bufsize) {
277 : /* We'll be using the small buffer pool only */
278 1 : count = opts_iobuf.small_pool_count;
279 : } else {
280 3 : count = spdk_min(opts_iobuf.small_pool_count, opts_iobuf.large_pool_count);
281 : }
282 :
283 4 : if (ctx->opts.num_shared_buffers > count) {
284 0 : SPDK_WARNLOG("The num_shared_buffers value (%u) is larger than the available iobuf"
285 : " pool size (%lu). Please increase the iobuf pool sizes.\n",
286 : ctx->opts.num_shared_buffers, count);
287 : }
288 :
289 4 : ctx->cb_fn = cb_fn;
290 4 : ctx->cb_arg = cb_arg;
291 :
292 : /* Prioritize sync create operation. */
293 4 : if (ctx->ops->create) {
294 1 : if (sync) {
295 1 : _nvmf_transport_create_done(ctx);
296 1 : return 0;
297 : }
298 :
299 0 : rc = spdk_thread_send_msg(spdk_get_thread(), _nvmf_transport_create_done, ctx);
300 0 : if (rc) {
301 0 : goto err;
302 : }
303 :
304 0 : return 0;
305 : }
306 :
307 3 : assert(ctx->ops->create_async);
308 3 : rc = ctx->ops->create_async(&ctx->opts, nvmf_transport_create_async_done, ctx);
309 3 : if (rc) {
310 0 : SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
311 0 : goto err;
312 : }
313 :
314 3 : return 0;
315 4 : err:
316 4 : free(ctx);
317 4 : return -1;
318 : }
319 :
320 : int
321 7 : spdk_nvmf_transport_create_async(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
322 : spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg)
323 : {
324 7 : return nvmf_transport_create(transport_name, opts, cb_fn, cb_arg, false);
325 : }
326 :
327 : static void
328 1 : nvmf_transport_create_sync_done(void *cb_arg, struct spdk_nvmf_transport *transport)
329 : {
330 1 : struct spdk_nvmf_transport **_transport = cb_arg;
331 :
332 1 : *_transport = transport;
333 1 : }
334 :
335 : struct spdk_nvmf_transport *
336 1 : spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts)
337 : {
338 1 : struct spdk_nvmf_transport *transport = NULL;
339 :
340 : /* Current implementation supports synchronous version of create operation only. */
341 1 : assert(nvmf_get_transport_ops(transport_name) && nvmf_get_transport_ops(transport_name)->create);
342 :
343 1 : nvmf_transport_create(transport_name, opts, nvmf_transport_create_sync_done, &transport, true);
344 1 : return transport;
345 : }
346 :
347 : struct spdk_nvmf_transport *
348 10 : spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
349 : {
350 10 : return TAILQ_FIRST(&tgt->transports);
351 : }
352 :
353 : struct spdk_nvmf_transport *
354 1 : spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
355 : {
356 1 : return TAILQ_NEXT(transport, link);
357 : }
358 :
359 : int
360 3 : spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
361 : spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
362 : {
363 : struct spdk_nvmf_listener *listener, *listener_tmp;
364 :
365 3 : TAILQ_FOREACH_SAFE(listener, &transport->listeners, link, listener_tmp) {
366 0 : TAILQ_REMOVE(&transport->listeners, listener, link);
367 0 : transport->ops->stop_listen(transport, &listener->trid);
368 0 : free(listener);
369 : }
370 :
371 3 : if (nvmf_transport_use_iobuf(transport)) {
372 3 : spdk_iobuf_unregister_module(transport->iobuf_name);
373 : }
374 :
375 3 : pthread_mutex_destroy(&transport->mutex);
376 3 : return transport->ops->destroy(transport, cb_fn, cb_arg);
377 : }
378 :
379 : struct spdk_nvmf_listener *
380 11 : nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
381 : const struct spdk_nvme_transport_id *trid)
382 : {
383 : struct spdk_nvmf_listener *listener;
384 :
385 11 : TAILQ_FOREACH(listener, &transport->listeners, link) {
386 6 : if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
387 6 : return listener;
388 : }
389 : }
390 :
391 5 : return NULL;
392 : }
393 :
394 : int
395 3 : spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
396 : const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts)
397 : {
398 : struct spdk_nvmf_listener *listener;
399 : int rc;
400 :
401 3 : listener = nvmf_transport_find_listener(transport, trid);
402 3 : if (!listener) {
403 2 : listener = calloc(1, sizeof(*listener));
404 2 : if (!listener) {
405 0 : return -ENOMEM;
406 : }
407 :
408 2 : listener->ref = 1;
409 2 : listener->trid = *trid;
410 2 : TAILQ_INSERT_TAIL(&transport->listeners, listener, link);
411 2 : pthread_mutex_lock(&transport->mutex);
412 2 : rc = transport->ops->listen(transport, &listener->trid, opts);
413 2 : pthread_mutex_unlock(&transport->mutex);
414 2 : if (rc != 0) {
415 1 : TAILQ_REMOVE(&transport->listeners, listener, link);
416 1 : free(listener);
417 : }
418 2 : return rc;
419 : }
420 :
421 1 : ++listener->ref;
422 :
423 1 : return 0;
424 : }
425 :
426 : int
427 3 : spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
428 : const struct spdk_nvme_transport_id *trid)
429 : {
430 : struct spdk_nvmf_listener *listener;
431 :
432 3 : listener = nvmf_transport_find_listener(transport, trid);
433 3 : if (!listener) {
434 1 : return -ENOENT;
435 : }
436 :
437 2 : if (--listener->ref == 0) {
438 1 : TAILQ_REMOVE(&transport->listeners, listener, link);
439 1 : pthread_mutex_lock(&transport->mutex);
440 1 : transport->ops->stop_listen(transport, trid);
441 1 : pthread_mutex_unlock(&transport->mutex);
442 1 : free(listener);
443 : }
444 :
445 2 : return 0;
446 : }
447 :
448 : struct nvmf_stop_listen_ctx {
449 : struct spdk_nvmf_transport *transport;
450 : struct spdk_nvme_transport_id trid;
451 : struct spdk_nvmf_subsystem *subsystem;
452 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
453 : void *cb_arg;
454 : };
455 :
456 : static void
457 0 : nvmf_stop_listen_fini(struct spdk_io_channel_iter *i, int status)
458 : {
459 : struct nvmf_stop_listen_ctx *ctx;
460 : struct spdk_nvmf_transport *transport;
461 0 : int rc = status;
462 :
463 0 : ctx = spdk_io_channel_iter_get_ctx(i);
464 0 : transport = ctx->transport;
465 0 : assert(transport != NULL);
466 :
467 0 : rc = spdk_nvmf_transport_stop_listen(transport, &ctx->trid);
468 0 : if (rc) {
469 0 : SPDK_ERRLOG("Failed to stop listening on address '%s'\n", ctx->trid.traddr);
470 : }
471 :
472 0 : if (ctx->cb_fn) {
473 0 : ctx->cb_fn(ctx->cb_arg, rc);
474 : }
475 0 : free(ctx);
476 0 : }
477 :
478 : static void nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i);
479 :
480 : static void
481 0 : nvmf_stop_listen_disconnect_qpairs_msg(void *ctx)
482 : {
483 0 : nvmf_stop_listen_disconnect_qpairs((struct spdk_io_channel_iter *)ctx);
484 0 : }
485 :
486 : static void
487 0 : nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i)
488 : {
489 : struct nvmf_stop_listen_ctx *ctx;
490 : struct spdk_nvmf_poll_group *group;
491 : struct spdk_io_channel *ch;
492 : struct spdk_nvmf_qpair *qpair, *tmp_qpair;
493 0 : struct spdk_nvme_transport_id tmp_trid;
494 0 : bool qpair_found = false;
495 :
496 0 : ctx = spdk_io_channel_iter_get_ctx(i);
497 0 : ch = spdk_io_channel_iter_get_channel(i);
498 0 : group = spdk_io_channel_get_ctx(ch);
499 :
500 0 : TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
501 0 : if (spdk_nvmf_qpair_get_listen_trid(qpair, &tmp_trid)) {
502 0 : continue;
503 : }
504 :
505 : /* Skip qpairs that don't match the listen trid and subsystem pointer. If
506 : * the ctx->subsystem is NULL, it means disconnect all qpairs that match
507 : * the listen trid. */
508 0 : if (!spdk_nvme_transport_id_compare(&ctx->trid, &tmp_trid)) {
509 0 : if (ctx->subsystem == NULL ||
510 0 : (qpair->ctrlr != NULL && ctx->subsystem == qpair->ctrlr->subsys)) {
511 0 : spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
512 0 : qpair_found = true;
513 : }
514 : }
515 : }
516 0 : if (qpair_found) {
517 0 : spdk_thread_send_msg(spdk_get_thread(), nvmf_stop_listen_disconnect_qpairs_msg, i);
518 0 : return;
519 : }
520 :
521 0 : spdk_for_each_channel_continue(i, 0);
522 : }
523 :
524 : int
525 0 : spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
526 : const struct spdk_nvme_transport_id *trid,
527 : struct spdk_nvmf_subsystem *subsystem,
528 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
529 : void *cb_arg)
530 : {
531 : struct nvmf_stop_listen_ctx *ctx;
532 :
533 0 : if (trid->subnqn[0] != '\0') {
534 0 : SPDK_ERRLOG("subnqn should be empty, use subsystem pointer instead\n");
535 0 : return -EINVAL;
536 : }
537 :
538 0 : ctx = calloc(1, sizeof(struct nvmf_stop_listen_ctx));
539 0 : if (ctx == NULL) {
540 0 : return -ENOMEM;
541 : }
542 :
543 0 : ctx->trid = *trid;
544 0 : ctx->subsystem = subsystem;
545 0 : ctx->transport = transport;
546 0 : ctx->cb_fn = cb_fn;
547 0 : ctx->cb_arg = cb_arg;
548 :
549 0 : spdk_for_each_channel(transport->tgt, nvmf_stop_listen_disconnect_qpairs, ctx,
550 : nvmf_stop_listen_fini);
551 :
552 0 : return 0;
553 : }
554 :
555 : void
556 0 : nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
557 : struct spdk_nvme_transport_id *trid,
558 : struct spdk_nvmf_discovery_log_page_entry *entry)
559 : {
560 0 : transport->ops->listener_discover(transport, trid, entry);
561 0 : }
562 :
563 : struct spdk_nvmf_transport_poll_group *
564 2 : nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
565 : struct spdk_nvmf_poll_group *group)
566 : {
567 : struct spdk_nvmf_transport_poll_group *tgroup;
568 2 : struct spdk_iobuf_opts opts_iobuf = {};
569 : uint32_t buf_cache_size, small_cache_size, large_cache_size;
570 : int rc;
571 :
572 2 : pthread_mutex_lock(&transport->mutex);
573 2 : tgroup = transport->ops->poll_group_create(transport, group);
574 2 : pthread_mutex_unlock(&transport->mutex);
575 2 : if (!tgroup) {
576 0 : return NULL;
577 : }
578 2 : tgroup->transport = transport;
579 :
580 2 : STAILQ_INIT(&tgroup->pending_buf_queue);
581 :
582 2 : if (!nvmf_transport_use_iobuf(transport)) {
583 : /* We aren't going to allocate any shared buffers or cache, so just return now. */
584 0 : return tgroup;
585 : }
586 :
587 2 : buf_cache_size = transport->opts.buf_cache_size;
588 :
589 : /* buf_cache_size of UINT32_MAX means the value should be calculated dynamically
590 : * based on the number of buffers in the shared pool and the number of poll groups
591 : * that are sharing them. We allocate 75% of the pool for the cache, and then
592 : * divide that by number of poll groups to determine the buf_cache_size for this
593 : * poll group.
594 : */
595 2 : if (buf_cache_size == UINT32_MAX) {
596 0 : uint32_t num_shared_buffers = transport->opts.num_shared_buffers;
597 :
598 : /* Theoretically the nvmf library can dynamically add poll groups to
599 : * the target, after transports have already been created. We aren't
600 : * going to try to really handle this case efficiently, just do enough
601 : * here to ensure we don't divide-by-zero.
602 : */
603 0 : uint16_t num_poll_groups = group->tgt->num_poll_groups ? : spdk_env_get_core_count();
604 :
605 0 : buf_cache_size = (num_shared_buffers * 3 / 4) / num_poll_groups;
606 : }
607 :
608 2 : spdk_iobuf_get_opts(&opts_iobuf);
609 2 : small_cache_size = buf_cache_size;
610 2 : if (transport->opts.io_unit_size <= opts_iobuf.small_bufsize) {
611 2 : large_cache_size = 0;
612 : } else {
613 0 : large_cache_size = buf_cache_size;
614 : }
615 :
616 2 : tgroup->buf_cache = calloc(1, sizeof(*tgroup->buf_cache));
617 2 : if (!tgroup->buf_cache) {
618 0 : SPDK_ERRLOG("Unable to allocate an iobuf channel in the poll group.\n");
619 0 : goto err;
620 : }
621 :
622 2 : rc = spdk_iobuf_channel_init(tgroup->buf_cache, transport->iobuf_name, small_cache_size,
623 : large_cache_size);
624 2 : if (rc != 0) {
625 0 : SPDK_ERRLOG("Unable to reserve the full number of buffers for the pg buffer cache.\n");
626 0 : rc = spdk_iobuf_channel_init(tgroup->buf_cache, transport->iobuf_name, 0, 0);
627 0 : if (rc != 0) {
628 0 : SPDK_ERRLOG("Unable to create an iobuf channel in the poll group.\n");
629 0 : goto err;
630 : }
631 : }
632 :
633 2 : return tgroup;
634 0 : err:
635 0 : transport->ops->poll_group_destroy(tgroup);
636 0 : return NULL;
637 : }
638 :
639 : struct spdk_nvmf_transport_poll_group *
640 0 : nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
641 : struct spdk_nvmf_qpair *qpair)
642 : {
643 : struct spdk_nvmf_transport_poll_group *tgroup;
644 :
645 0 : if (transport->ops->get_optimal_poll_group) {
646 0 : pthread_mutex_lock(&transport->mutex);
647 0 : tgroup = transport->ops->get_optimal_poll_group(qpair);
648 0 : pthread_mutex_unlock(&transport->mutex);
649 :
650 0 : return tgroup;
651 : } else {
652 0 : return NULL;
653 : }
654 : }
655 :
656 : void
657 2 : nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
658 : {
659 : struct spdk_nvmf_transport *transport;
660 2 : struct spdk_iobuf_channel *ch = NULL;
661 :
662 2 : transport = group->transport;
663 :
664 2 : if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
665 0 : SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
666 : }
667 :
668 2 : if (nvmf_transport_use_iobuf(transport)) {
669 : /* The call to poll_group_destroy both frees the group memory, but also
670 : * releases any remaining buffers. Cache channel pointer so we can still
671 : * release the resources after the group has been freed. */
672 2 : ch = group->buf_cache;
673 : }
674 :
675 2 : pthread_mutex_lock(&transport->mutex);
676 2 : transport->ops->poll_group_destroy(group);
677 2 : pthread_mutex_unlock(&transport->mutex);
678 :
679 2 : if (nvmf_transport_use_iobuf(transport)) {
680 2 : spdk_iobuf_channel_fini(ch);
681 2 : free(ch);
682 : }
683 2 : }
684 :
685 : int
686 0 : nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
687 : struct spdk_nvmf_qpair *qpair)
688 : {
689 0 : if (qpair->transport) {
690 0 : assert(qpair->transport == group->transport);
691 0 : if (qpair->transport != group->transport) {
692 0 : return -1;
693 : }
694 : } else {
695 0 : qpair->transport = group->transport;
696 : }
697 :
698 : SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_add, qpair, qpair->qid,
699 : spdk_thread_get_id(group->group->thread));
700 :
701 0 : return group->transport->ops->poll_group_add(group, qpair);
702 : }
703 :
704 : int
705 0 : nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
706 : struct spdk_nvmf_qpair *qpair)
707 : {
708 0 : int rc = ENOTSUP;
709 :
710 : SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_remove, qpair, qpair->qid,
711 : spdk_thread_get_id(group->group->thread));
712 :
713 0 : assert(qpair->transport == group->transport);
714 0 : if (group->transport->ops->poll_group_remove) {
715 0 : rc = group->transport->ops->poll_group_remove(group, qpair);
716 : }
717 :
718 0 : return rc;
719 : }
720 :
721 : int
722 0 : nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
723 : {
724 0 : return group->transport->ops->poll_group_poll(group);
725 : }
726 :
727 : int
728 0 : nvmf_transport_req_free(struct spdk_nvmf_request *req)
729 : {
730 0 : return req->qpair->transport->ops->req_free(req);
731 : }
732 :
733 : int
734 0 : nvmf_transport_req_complete(struct spdk_nvmf_request *req)
735 : {
736 0 : return req->qpair->transport->ops->req_complete(req);
737 : }
738 :
739 : void
740 0 : nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair,
741 : spdk_nvmf_transport_qpair_fini_cb cb_fn,
742 : void *cb_arg)
743 : {
744 : SPDK_DTRACE_PROBE1(nvmf_transport_qpair_fini, qpair);
745 :
746 0 : qpair->transport->ops->qpair_fini(qpair, cb_fn, cb_arg);
747 0 : }
748 :
749 : int
750 0 : nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
751 : struct spdk_nvme_transport_id *trid)
752 : {
753 0 : return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
754 : }
755 :
756 : int
757 0 : nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
758 : struct spdk_nvme_transport_id *trid)
759 : {
760 0 : return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
761 : }
762 :
763 : int
764 0 : nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
765 : struct spdk_nvme_transport_id *trid)
766 : {
767 0 : return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
768 : }
769 :
770 : void
771 0 : nvmf_transport_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
772 : struct spdk_nvmf_request *req)
773 : {
774 0 : if (qpair->transport->ops->qpair_abort_request) {
775 0 : qpair->transport->ops->qpair_abort_request(qpair, req);
776 : }
777 0 : }
778 :
779 : bool
780 4 : spdk_nvmf_transport_opts_init(const char *transport_name,
781 : struct spdk_nvmf_transport_opts *opts, size_t opts_size)
782 : {
783 : const struct spdk_nvmf_transport_ops *ops;
784 4 : struct spdk_nvmf_transport_opts opts_local = {};
785 :
786 4 : ops = nvmf_get_transport_ops(transport_name);
787 4 : if (!ops) {
788 1 : SPDK_ERRLOG("Transport type %s unavailable.\n", transport_name);
789 1 : return false;
790 : }
791 :
792 3 : if (!opts) {
793 1 : SPDK_ERRLOG("opts should not be NULL\n");
794 1 : return false;
795 : }
796 :
797 2 : if (!opts_size) {
798 1 : SPDK_ERRLOG("opts_size inside opts should not be zero value\n");
799 1 : return false;
800 : }
801 :
802 1 : opts_local.association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
803 1 : opts_local.acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
804 1 : ops->opts_init(&opts_local);
805 :
806 1 : nvmf_transport_opts_copy(opts, &opts_local, opts_size);
807 :
808 1 : return true;
809 : }
810 :
811 : void
812 6 : spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
813 : struct spdk_nvmf_transport_poll_group *group,
814 : struct spdk_nvmf_transport *transport)
815 : {
816 : uint32_t i;
817 :
818 11 : for (i = 0; i < req->iovcnt; i++) {
819 5 : spdk_iobuf_put(group->buf_cache, req->iov[i].iov_base, req->iov[i].iov_len);
820 5 : req->iov[i].iov_base = NULL;
821 5 : req->iov[i].iov_len = 0;
822 : }
823 6 : req->iovcnt = 0;
824 6 : req->data_from_pool = false;
825 6 : }
826 :
827 : typedef int (*set_buffer_callback)(struct spdk_nvmf_request *req, void *buf,
828 : uint32_t length, uint32_t io_unit_size);
829 : static int
830 74 : nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
831 : uint32_t io_unit_size)
832 : {
833 74 : req->iov[req->iovcnt].iov_base = buf;
834 74 : req->iov[req->iovcnt].iov_len = spdk_min(length, io_unit_size);
835 74 : length -= req->iov[req->iovcnt].iov_len;
836 74 : req->iovcnt++;
837 :
838 74 : return length;
839 : }
840 :
841 : static int
842 25 : nvmf_request_get_buffers(struct spdk_nvmf_request *req,
843 : struct spdk_nvmf_transport_poll_group *group,
844 : struct spdk_nvmf_transport *transport,
845 : uint32_t length, uint32_t io_unit_size,
846 : set_buffer_callback cb_func)
847 : {
848 : uint32_t num_buffers;
849 25 : uint32_t i = 0;
850 : void *buffer;
851 :
852 : /* If the number of buffers is too large, then we know the I/O is larger than allowed.
853 : * Fail it.
854 : */
855 25 : num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
856 25 : if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
857 0 : return -EINVAL;
858 : }
859 :
860 105 : while (i < num_buffers) {
861 81 : buffer = spdk_iobuf_get(group->buf_cache, spdk_min(io_unit_size, length), NULL, NULL);
862 81 : if (buffer == NULL) {
863 1 : return -ENOMEM;
864 : }
865 80 : length = cb_func(req, buffer, length, io_unit_size);
866 80 : i++;
867 : }
868 :
869 24 : assert(length == 0);
870 :
871 24 : return 0;
872 : }
873 :
874 : int
875 21 : spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
876 : struct spdk_nvmf_transport_poll_group *group,
877 : struct spdk_nvmf_transport *transport,
878 : uint32_t length)
879 : {
880 : int rc;
881 :
882 21 : assert(nvmf_transport_use_iobuf(transport));
883 :
884 21 : req->iovcnt = 0;
885 21 : rc = nvmf_request_get_buffers(req, group, transport, length,
886 : transport->opts.io_unit_size,
887 : nvmf_request_set_buffer);
888 21 : if (!rc) {
889 20 : req->data_from_pool = true;
890 1 : } else if (rc == -ENOMEM) {
891 1 : spdk_nvmf_request_free_buffers(req, group, transport);
892 1 : return rc;
893 : }
894 :
895 20 : return rc;
896 : }
897 :
898 : static int
899 6 : nvmf_request_set_stripped_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
900 : uint32_t io_unit_size)
901 : {
902 6 : struct spdk_nvmf_stripped_data *data = req->stripped_data;
903 :
904 6 : data->iov[data->iovcnt].iov_base = buf;
905 6 : data->iov[data->iovcnt].iov_len = spdk_min(length, io_unit_size);
906 6 : length -= data->iov[data->iovcnt].iov_len;
907 6 : data->iovcnt++;
908 :
909 6 : return length;
910 : }
911 :
912 : void
913 0 : nvmf_request_free_stripped_buffers(struct spdk_nvmf_request *req,
914 : struct spdk_nvmf_transport_poll_group *group,
915 : struct spdk_nvmf_transport *transport)
916 : {
917 0 : struct spdk_nvmf_stripped_data *data = req->stripped_data;
918 : uint32_t i;
919 :
920 0 : for (i = 0; i < data->iovcnt; i++) {
921 0 : spdk_iobuf_put(group->buf_cache, data->iov[i].iov_base, data->iov[i].iov_len);
922 : }
923 0 : free(data);
924 0 : req->stripped_data = NULL;
925 0 : }
926 :
927 : int
928 8 : nvmf_request_get_stripped_buffers(struct spdk_nvmf_request *req,
929 : struct spdk_nvmf_transport_poll_group *group,
930 : struct spdk_nvmf_transport *transport,
931 : uint32_t length)
932 : {
933 8 : uint32_t block_size = req->dif.dif_ctx.block_size;
934 8 : uint32_t data_block_size = block_size - req->dif.dif_ctx.md_size;
935 8 : uint32_t io_unit_size = transport->opts.io_unit_size / block_size * data_block_size;
936 : struct spdk_nvmf_stripped_data *data;
937 : uint32_t i;
938 : int rc;
939 :
940 : /* Data blocks must be block aligned */
941 14 : for (i = 0; i < req->iovcnt; i++) {
942 10 : if (req->iov[i].iov_len % block_size) {
943 4 : return -EINVAL;
944 : }
945 : }
946 :
947 4 : data = calloc(1, sizeof(*data));
948 4 : if (data == NULL) {
949 0 : SPDK_ERRLOG("Unable to allocate memory for stripped_data.\n");
950 0 : return -ENOMEM;
951 : }
952 4 : req->stripped_data = data;
953 4 : req->stripped_data->iovcnt = 0;
954 :
955 4 : rc = nvmf_request_get_buffers(req, group, transport, length, io_unit_size,
956 : nvmf_request_set_stripped_buffer);
957 4 : if (rc == -ENOMEM) {
958 0 : nvmf_request_free_stripped_buffers(req, group, transport);
959 0 : return rc;
960 : }
961 4 : return rc;
962 : }
|