Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2023 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "spdk/env.h"
7 : #include "spdk/util.h"
8 : #include "spdk/likely.h"
9 : #include "spdk/log.h"
10 : #include "spdk/thread.h"
11 :
12 : #define IOBUF_MIN_SMALL_POOL_SIZE 64
13 : #define IOBUF_MIN_LARGE_POOL_SIZE 8
14 : #define IOBUF_DEFAULT_SMALL_POOL_SIZE 8192
15 : #define IOBUF_DEFAULT_LARGE_POOL_SIZE 1024
16 : #define IOBUF_ALIGNMENT 4096
17 : #define IOBUF_MIN_SMALL_BUFSIZE 4096
18 : #define IOBUF_MIN_LARGE_BUFSIZE 8192
19 : #define IOBUF_DEFAULT_SMALL_BUFSIZE (8 * 1024)
20 : /* 132k is a weird choice at first, but this needs to be large enough to accomodate
21 : * the default maximum size (128k) plus metadata everywhere. For code paths that
22 : * are explicitly configured, the math is instead done properly. This is only
23 : * for the default. */
24 : #define IOBUF_DEFAULT_LARGE_BUFSIZE (132 * 1024)
25 : #define IOBUF_MAX_CHANNELS 64
26 :
27 : SPDK_STATIC_ASSERT(sizeof(struct spdk_iobuf_buffer) <= IOBUF_MIN_SMALL_BUFSIZE,
28 : "Invalid data offset");
29 :
30 : struct iobuf_channel {
31 : spdk_iobuf_entry_stailq_t small_queue;
32 : spdk_iobuf_entry_stailq_t large_queue;
33 : struct spdk_iobuf_channel *channels[IOBUF_MAX_CHANNELS];
34 : };
35 :
36 : struct iobuf_module {
37 : char *name;
38 : TAILQ_ENTRY(iobuf_module) tailq;
39 : };
40 :
41 : struct iobuf {
42 : struct spdk_ring *small_pool;
43 : struct spdk_ring *large_pool;
44 : void *small_pool_base;
45 : void *large_pool_base;
46 : struct spdk_iobuf_opts opts;
47 : TAILQ_HEAD(, iobuf_module) modules;
48 : spdk_iobuf_finish_cb finish_cb;
49 : void *finish_arg;
50 : };
51 :
52 : static struct iobuf g_iobuf = {
53 : .modules = TAILQ_HEAD_INITIALIZER(g_iobuf.modules),
54 : .small_pool = NULL,
55 : .large_pool = NULL,
56 : .small_pool_base = NULL,
57 : .large_pool_base = NULL,
58 : .opts = {
59 : .small_pool_count = IOBUF_DEFAULT_SMALL_POOL_SIZE,
60 : .large_pool_count = IOBUF_DEFAULT_LARGE_POOL_SIZE,
61 : .small_bufsize = IOBUF_DEFAULT_SMALL_BUFSIZE,
62 : .large_bufsize = IOBUF_DEFAULT_LARGE_BUFSIZE,
63 : },
64 : };
65 :
66 : struct iobuf_get_stats_ctx {
67 : struct spdk_iobuf_module_stats *modules;
68 : uint32_t num_modules;
69 : spdk_iobuf_get_stats_cb cb_fn;
70 : void *cb_arg;
71 : };
72 :
73 : static int
74 75 : iobuf_channel_create_cb(void *io_device, void *ctx)
75 : {
76 75 : struct iobuf_channel *ch = ctx;
77 :
78 75 : STAILQ_INIT(&ch->small_queue);
79 75 : STAILQ_INIT(&ch->large_queue);
80 :
81 75 : return 0;
82 : }
83 :
84 : static void
85 75 : iobuf_channel_destroy_cb(void *io_device, void *ctx)
86 : {
87 75 : struct iobuf_channel *ch __attribute__((unused)) = ctx;
88 :
89 75 : assert(STAILQ_EMPTY(&ch->small_queue));
90 75 : assert(STAILQ_EMPTY(&ch->large_queue));
91 75 : }
92 :
93 : int
94 65 : spdk_iobuf_initialize(void)
95 : {
96 65 : struct spdk_iobuf_opts *opts = &g_iobuf.opts;
97 65 : int rc = 0;
98 : uint64_t i;
99 65 : struct spdk_iobuf_buffer *buf;
100 :
101 65 : g_iobuf.small_pool = spdk_ring_create(SPDK_RING_TYPE_MP_MC, opts->small_pool_count,
102 : SPDK_ENV_SOCKET_ID_ANY);
103 65 : if (!g_iobuf.small_pool) {
104 0 : SPDK_ERRLOG("Failed to create small iobuf pool\n");
105 0 : rc = -ENOMEM;
106 0 : goto error;
107 : }
108 :
109 : /* Round up to the nearest alignment so that each element remains aligned */
110 65 : opts->small_bufsize = SPDK_ALIGN_CEIL(opts->small_bufsize, IOBUF_ALIGNMENT);
111 65 : g_iobuf.small_pool_base = spdk_malloc(opts->small_bufsize * opts->small_pool_count, IOBUF_ALIGNMENT,
112 : NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
113 65 : if (g_iobuf.small_pool_base == NULL) {
114 0 : SPDK_ERRLOG("Unable to allocate requested small iobuf pool size\n");
115 0 : rc = -ENOMEM;
116 0 : goto error;
117 : }
118 :
119 65 : g_iobuf.large_pool = spdk_ring_create(SPDK_RING_TYPE_MP_MC, opts->large_pool_count,
120 : SPDK_ENV_SOCKET_ID_ANY);
121 65 : if (!g_iobuf.large_pool) {
122 0 : SPDK_ERRLOG("Failed to create large iobuf pool\n");
123 0 : rc = -ENOMEM;
124 0 : goto error;
125 : }
126 :
127 : /* Round up to the nearest alignment so that each element remains aligned */
128 65 : opts->large_bufsize = SPDK_ALIGN_CEIL(opts->large_bufsize, IOBUF_ALIGNMENT);
129 65 : g_iobuf.large_pool_base = spdk_malloc(opts->large_bufsize * opts->large_pool_count, IOBUF_ALIGNMENT,
130 : NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
131 65 : if (g_iobuf.large_pool_base == NULL) {
132 0 : SPDK_ERRLOG("Unable to allocate requested large iobuf pool size\n");
133 0 : rc = -ENOMEM;
134 0 : goto error;
135 : }
136 :
137 516167 : for (i = 0; i < opts->small_pool_count; i++) {
138 516102 : buf = g_iobuf.small_pool_base + i * opts->small_bufsize;
139 516102 : spdk_ring_enqueue(g_iobuf.small_pool, (void **)&buf, 1, NULL);
140 : }
141 :
142 64583 : for (i = 0; i < opts->large_pool_count; i++) {
143 64518 : buf = g_iobuf.large_pool_base + i * opts->large_bufsize;
144 64518 : spdk_ring_enqueue(g_iobuf.large_pool, (void **)&buf, 1, NULL);
145 : }
146 :
147 65 : spdk_io_device_register(&g_iobuf, iobuf_channel_create_cb, iobuf_channel_destroy_cb,
148 : sizeof(struct iobuf_channel), "iobuf");
149 :
150 65 : return 0;
151 0 : error:
152 0 : spdk_free(g_iobuf.small_pool_base);
153 0 : spdk_ring_free(g_iobuf.small_pool);
154 0 : spdk_free(g_iobuf.large_pool_base);
155 0 : spdk_ring_free(g_iobuf.large_pool);
156 :
157 0 : return rc;
158 : }
159 :
160 : static void
161 65 : iobuf_unregister_cb(void *io_device)
162 : {
163 : struct iobuf_module *module;
164 :
165 132 : while (!TAILQ_EMPTY(&g_iobuf.modules)) {
166 67 : module = TAILQ_FIRST(&g_iobuf.modules);
167 67 : TAILQ_REMOVE(&g_iobuf.modules, module, tailq);
168 67 : free(module->name);
169 67 : free(module);
170 : }
171 :
172 65 : if (spdk_ring_count(g_iobuf.small_pool) != g_iobuf.opts.small_pool_count) {
173 0 : SPDK_ERRLOG("small iobuf pool count is %zu, expected %"PRIu64"\n",
174 : spdk_ring_count(g_iobuf.small_pool), g_iobuf.opts.small_pool_count);
175 : }
176 :
177 65 : if (spdk_ring_count(g_iobuf.large_pool) != g_iobuf.opts.large_pool_count) {
178 0 : SPDK_ERRLOG("large iobuf pool count is %zu, expected %"PRIu64"\n",
179 : spdk_ring_count(g_iobuf.large_pool), g_iobuf.opts.large_pool_count);
180 : }
181 :
182 65 : spdk_free(g_iobuf.small_pool_base);
183 65 : g_iobuf.small_pool_base = NULL;
184 65 : spdk_ring_free(g_iobuf.small_pool);
185 65 : g_iobuf.small_pool = NULL;
186 :
187 65 : spdk_free(g_iobuf.large_pool_base);
188 65 : g_iobuf.large_pool_base = NULL;
189 65 : spdk_ring_free(g_iobuf.large_pool);
190 65 : g_iobuf.large_pool = NULL;
191 :
192 65 : if (g_iobuf.finish_cb != NULL) {
193 65 : g_iobuf.finish_cb(g_iobuf.finish_arg);
194 : }
195 65 : }
196 :
197 : void
198 65 : spdk_iobuf_finish(spdk_iobuf_finish_cb cb_fn, void *cb_arg)
199 : {
200 65 : g_iobuf.finish_cb = cb_fn;
201 65 : g_iobuf.finish_arg = cb_arg;
202 :
203 65 : spdk_io_device_unregister(&g_iobuf, iobuf_unregister_cb);
204 65 : }
205 :
206 : int
207 0 : spdk_iobuf_set_opts(const struct spdk_iobuf_opts *opts)
208 : {
209 0 : if (opts->small_pool_count < IOBUF_MIN_SMALL_POOL_SIZE) {
210 0 : SPDK_ERRLOG("small_pool_count must be at least %" PRIu32 "\n",
211 : IOBUF_MIN_SMALL_POOL_SIZE);
212 0 : return -EINVAL;
213 : }
214 0 : if (opts->large_pool_count < IOBUF_MIN_LARGE_POOL_SIZE) {
215 0 : SPDK_ERRLOG("large_pool_count must be at least %" PRIu32 "\n",
216 : IOBUF_MIN_LARGE_POOL_SIZE);
217 0 : return -EINVAL;
218 : }
219 :
220 0 : g_iobuf.opts = *opts;
221 :
222 0 : if (opts->small_bufsize < IOBUF_MIN_SMALL_BUFSIZE) {
223 0 : SPDK_ERRLOG("small_bufsize must be at least %" PRIu32 ". Automatically increasing.\n",
224 : IOBUF_MIN_SMALL_BUFSIZE);
225 0 : g_iobuf.opts.small_bufsize = IOBUF_MIN_SMALL_BUFSIZE;
226 : }
227 :
228 0 : if (opts->large_bufsize < IOBUF_MIN_LARGE_BUFSIZE) {
229 0 : SPDK_WARNLOG("large_bufsize must be at least %" PRIu32 ". Automatically increasing.\n",
230 : IOBUF_MIN_LARGE_BUFSIZE);
231 0 : g_iobuf.opts.large_bufsize = IOBUF_MIN_LARGE_BUFSIZE;
232 : }
233 :
234 0 : return 0;
235 : }
236 :
237 : void
238 124 : spdk_iobuf_get_opts(struct spdk_iobuf_opts *opts)
239 : {
240 124 : *opts = g_iobuf.opts;
241 124 : }
242 :
243 : int
244 82 : spdk_iobuf_channel_init(struct spdk_iobuf_channel *ch, const char *name,
245 : uint32_t small_cache_size, uint32_t large_cache_size)
246 : {
247 : struct spdk_io_channel *ioch;
248 : struct iobuf_channel *iobuf_ch;
249 : struct iobuf_module *module;
250 82 : struct spdk_iobuf_buffer *buf;
251 : uint32_t i;
252 :
253 87 : TAILQ_FOREACH(module, &g_iobuf.modules, tailq) {
254 87 : if (strcmp(name, module->name) == 0) {
255 82 : break;
256 : }
257 : }
258 :
259 82 : if (module == NULL) {
260 0 : SPDK_ERRLOG("Couldn't find iobuf module: '%s'\n", name);
261 0 : return -ENODEV;
262 : }
263 :
264 82 : ioch = spdk_get_io_channel(&g_iobuf);
265 82 : if (ioch == NULL) {
266 0 : SPDK_ERRLOG("Couldn't get iobuf IO channel\n");
267 0 : return -ENOMEM;
268 : }
269 :
270 82 : iobuf_ch = spdk_io_channel_get_ctx(ioch);
271 :
272 87 : for (i = 0; i < IOBUF_MAX_CHANNELS; ++i) {
273 87 : if (iobuf_ch->channels[i] == NULL) {
274 82 : iobuf_ch->channels[i] = ch;
275 82 : break;
276 : }
277 : }
278 :
279 82 : if (i == IOBUF_MAX_CHANNELS) {
280 0 : SPDK_ERRLOG("Max number of iobuf channels (%" PRIu32 ") exceeded.\n", i);
281 0 : goto error;
282 : }
283 :
284 82 : ch->small.queue = &iobuf_ch->small_queue;
285 82 : ch->large.queue = &iobuf_ch->large_queue;
286 82 : ch->small.pool = g_iobuf.small_pool;
287 82 : ch->large.pool = g_iobuf.large_pool;
288 82 : ch->small.bufsize = g_iobuf.opts.small_bufsize;
289 82 : ch->large.bufsize = g_iobuf.opts.large_bufsize;
290 82 : ch->parent = ioch;
291 82 : ch->module = module;
292 82 : ch->small.cache_size = small_cache_size;
293 82 : ch->large.cache_size = large_cache_size;
294 82 : ch->small.cache_count = 0;
295 82 : ch->large.cache_count = 0;
296 :
297 82 : STAILQ_INIT(&ch->small.cache);
298 82 : STAILQ_INIT(&ch->large.cache);
299 :
300 9058 : for (i = 0; i < small_cache_size; ++i) {
301 8978 : if (spdk_ring_dequeue(g_iobuf.small_pool, (void **)&buf, 1) == 0) {
302 2 : SPDK_ERRLOG("Failed to populate iobuf small buffer cache. "
303 : "You may need to increase spdk_iobuf_opts.small_pool_count (%"PRIu64")\n",
304 : g_iobuf.opts.small_pool_count);
305 2 : SPDK_ERRLOG("See scripts/calc-iobuf.py for guidance on how to calculate "
306 : "this value.\n");
307 2 : goto error;
308 : }
309 8976 : STAILQ_INSERT_TAIL(&ch->small.cache, buf, stailq);
310 8976 : ch->small.cache_count++;
311 : }
312 1215 : for (i = 0; i < large_cache_size; ++i) {
313 1136 : if (spdk_ring_dequeue(g_iobuf.large_pool, (void **)&buf, 1) == 0) {
314 1 : SPDK_ERRLOG("Failed to populate iobuf large buffer cache. "
315 : "You may need to increase spdk_iobuf_opts.large_pool_count (%"PRIu64")\n",
316 : g_iobuf.opts.large_pool_count);
317 1 : SPDK_ERRLOG("See scripts/calc-iobuf.py for guidance on how to calculate "
318 : "this value.\n");
319 1 : goto error;
320 : }
321 1135 : STAILQ_INSERT_TAIL(&ch->large.cache, buf, stailq);
322 1135 : ch->large.cache_count++;
323 : }
324 :
325 79 : return 0;
326 3 : error:
327 3 : spdk_iobuf_channel_fini(ch);
328 :
329 3 : return -ENOMEM;
330 : }
331 :
332 : void
333 82 : spdk_iobuf_channel_fini(struct spdk_iobuf_channel *ch)
334 : {
335 : struct spdk_iobuf_entry *entry __attribute__((unused));
336 82 : struct spdk_iobuf_buffer *buf;
337 : struct iobuf_channel *iobuf_ch;
338 : uint32_t i;
339 :
340 : /* Make sure none of the wait queue entries are coming from this module */
341 82 : STAILQ_FOREACH(entry, ch->small.queue, stailq) {
342 0 : assert(entry->module != ch->module);
343 : }
344 82 : STAILQ_FOREACH(entry, ch->large.queue, stailq) {
345 0 : assert(entry->module != ch->module);
346 : }
347 :
348 : /* Release cached buffers back to the pool */
349 9058 : while (!STAILQ_EMPTY(&ch->small.cache)) {
350 8976 : buf = STAILQ_FIRST(&ch->small.cache);
351 8976 : STAILQ_REMOVE_HEAD(&ch->small.cache, stailq);
352 8976 : spdk_ring_enqueue(g_iobuf.small_pool, (void **)&buf, 1, NULL);
353 8976 : ch->small.cache_count--;
354 : }
355 1217 : while (!STAILQ_EMPTY(&ch->large.cache)) {
356 1135 : buf = STAILQ_FIRST(&ch->large.cache);
357 1135 : STAILQ_REMOVE_HEAD(&ch->large.cache, stailq);
358 1135 : spdk_ring_enqueue(g_iobuf.large_pool, (void **)&buf, 1, NULL);
359 1135 : ch->large.cache_count--;
360 : }
361 :
362 82 : assert(ch->small.cache_count == 0);
363 82 : assert(ch->large.cache_count == 0);
364 :
365 82 : iobuf_ch = spdk_io_channel_get_ctx(ch->parent);
366 87 : for (i = 0; i < IOBUF_MAX_CHANNELS; ++i) {
367 87 : if (iobuf_ch->channels[i] == ch) {
368 82 : iobuf_ch->channels[i] = NULL;
369 82 : break;
370 : }
371 : }
372 :
373 82 : spdk_put_io_channel(ch->parent);
374 82 : ch->parent = NULL;
375 82 : }
376 :
377 : int
378 68 : spdk_iobuf_register_module(const char *name)
379 : {
380 : struct iobuf_module *module;
381 :
382 70 : TAILQ_FOREACH(module, &g_iobuf.modules, tailq) {
383 2 : if (strcmp(name, module->name) == 0) {
384 0 : return -EEXIST;
385 : }
386 : }
387 :
388 68 : module = calloc(1, sizeof(*module));
389 68 : if (module == NULL) {
390 0 : return -ENOMEM;
391 : }
392 :
393 68 : module->name = strdup(name);
394 68 : if (module->name == NULL) {
395 0 : free(module);
396 0 : return -ENOMEM;
397 : }
398 :
399 68 : TAILQ_INSERT_TAIL(&g_iobuf.modules, module, tailq);
400 :
401 68 : return 0;
402 : }
403 :
404 : int
405 0 : spdk_iobuf_unregister_module(const char *name)
406 : {
407 : struct iobuf_module *module;
408 :
409 0 : TAILQ_FOREACH(module, &g_iobuf.modules, tailq) {
410 0 : if (strcmp(name, module->name) == 0) {
411 0 : TAILQ_REMOVE(&g_iobuf.modules, module, tailq);
412 0 : free(module->name);
413 0 : free(module);
414 0 : return 0;
415 : }
416 : }
417 :
418 0 : return -ENOENT;
419 : }
420 :
421 : int
422 218 : spdk_iobuf_for_each_entry(struct spdk_iobuf_channel *ch, struct spdk_iobuf_pool *pool,
423 : spdk_iobuf_for_each_entry_fn cb_fn, void *cb_ctx)
424 : {
425 : struct spdk_iobuf_entry *entry, *tmp;
426 : int rc;
427 :
428 234 : STAILQ_FOREACH_SAFE(entry, pool->queue, stailq, tmp) {
429 : /* We only want to iterate over the entries requested by the module which owns ch */
430 16 : if (entry->module != ch->module) {
431 8 : continue;
432 : }
433 :
434 8 : rc = cb_fn(ch, entry, cb_ctx);
435 8 : if (rc != 0) {
436 0 : return rc;
437 : }
438 : }
439 :
440 218 : return 0;
441 : }
442 :
443 : void
444 12 : spdk_iobuf_entry_abort(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry,
445 : uint64_t len)
446 : {
447 : struct spdk_iobuf_pool *pool;
448 :
449 12 : if (len <= ch->small.bufsize) {
450 6 : pool = &ch->small;
451 : } else {
452 6 : assert(len <= ch->large.bufsize);
453 6 : pool = &ch->large;
454 : }
455 :
456 12 : STAILQ_REMOVE(pool->queue, entry, spdk_iobuf_entry, stailq);
457 12 : }
458 :
459 : #define IOBUF_BATCH_SIZE 32
460 :
461 : void *
462 97 : spdk_iobuf_get(struct spdk_iobuf_channel *ch, uint64_t len,
463 : struct spdk_iobuf_entry *entry, spdk_iobuf_get_cb cb_fn)
464 : {
465 : struct spdk_iobuf_pool *pool;
466 : void *buf;
467 :
468 97 : assert(spdk_io_channel_get_thread(ch->parent) == spdk_get_thread());
469 97 : if (len <= ch->small.bufsize) {
470 57 : pool = &ch->small;
471 : } else {
472 40 : assert(len <= ch->large.bufsize);
473 40 : pool = &ch->large;
474 : }
475 :
476 97 : buf = (void *)STAILQ_FIRST(&pool->cache);
477 97 : if (buf) {
478 54 : STAILQ_REMOVE_HEAD(&pool->cache, stailq);
479 54 : assert(pool->cache_count > 0);
480 54 : pool->cache_count--;
481 54 : pool->stats.cache++;
482 : } else {
483 43 : struct spdk_iobuf_buffer *bufs[IOBUF_BATCH_SIZE];
484 : size_t sz, i;
485 :
486 : /* If we're going to dequeue, we may as well dequeue a batch. */
487 43 : sz = spdk_ring_dequeue(pool->pool, (void **)bufs, spdk_min(IOBUF_BATCH_SIZE,
488 : spdk_max(pool->cache_size, 1)));
489 43 : if (sz == 0) {
490 24 : if (entry) {
491 24 : STAILQ_INSERT_TAIL(pool->queue, entry, stailq);
492 24 : entry->module = ch->module;
493 24 : entry->cb_fn = cb_fn;
494 24 : pool->stats.retry++;
495 : }
496 :
497 24 : return NULL;
498 : }
499 :
500 19 : pool->stats.main++;
501 20 : for (i = 0; i < (sz - 1); i++) {
502 1 : STAILQ_INSERT_HEAD(&pool->cache, bufs[i], stailq);
503 1 : pool->cache_count++;
504 : }
505 :
506 : /* The last one is the one we'll return */
507 19 : buf = bufs[i];
508 : }
509 :
510 73 : return (char *)buf;
511 : }
512 :
513 : void
514 85 : spdk_iobuf_put(struct spdk_iobuf_channel *ch, void *buf, uint64_t len)
515 : {
516 : struct spdk_iobuf_entry *entry;
517 : struct spdk_iobuf_buffer *iobuf_buf;
518 : struct spdk_iobuf_pool *pool;
519 : size_t sz;
520 :
521 85 : assert(spdk_io_channel_get_thread(ch->parent) == spdk_get_thread());
522 85 : if (len <= ch->small.bufsize) {
523 51 : pool = &ch->small;
524 : } else {
525 34 : pool = &ch->large;
526 : }
527 :
528 85 : if (STAILQ_EMPTY(pool->queue)) {
529 73 : if (pool->cache_size == 0) {
530 16 : spdk_ring_enqueue(pool->pool, (void **)&buf, 1, NULL);
531 16 : return;
532 : }
533 :
534 57 : iobuf_buf = (struct spdk_iobuf_buffer *)buf;
535 :
536 57 : STAILQ_INSERT_HEAD(&pool->cache, iobuf_buf, stailq);
537 57 : pool->cache_count++;
538 :
539 : /* The cache size may exceed the configured amount. We always dequeue from the
540 : * central pool in batches of known size, so wait until at least a batch
541 : * has been returned to actually return the buffers to the central pool. */
542 57 : sz = spdk_min(IOBUF_BATCH_SIZE, pool->cache_size);
543 57 : if (pool->cache_count >= pool->cache_size + sz) {
544 3 : struct spdk_iobuf_buffer *bufs[IOBUF_BATCH_SIZE];
545 : size_t i;
546 :
547 7 : for (i = 0; i < sz; i++) {
548 4 : bufs[i] = STAILQ_FIRST(&pool->cache);
549 4 : STAILQ_REMOVE_HEAD(&pool->cache, stailq);
550 4 : assert(pool->cache_count > 0);
551 4 : pool->cache_count--;
552 : }
553 :
554 3 : spdk_ring_enqueue(pool->pool, (void **)bufs, sz, NULL);
555 : }
556 : } else {
557 12 : entry = STAILQ_FIRST(pool->queue);
558 12 : STAILQ_REMOVE_HEAD(pool->queue, stailq);
559 12 : entry->cb_fn(entry, buf);
560 : }
561 : }
562 :
563 : static void
564 0 : iobuf_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
565 : {
566 0 : struct iobuf_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
567 :
568 0 : ctx->cb_fn(ctx->modules, ctx->num_modules, ctx->cb_arg);
569 0 : free(ctx->modules);
570 0 : free(ctx);
571 0 : }
572 :
573 : static void
574 0 : iobuf_get_channel_stats(struct spdk_io_channel_iter *iter)
575 : {
576 0 : struct iobuf_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
577 0 : struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
578 0 : struct iobuf_channel *iobuf_ch = spdk_io_channel_get_ctx(ch);
579 : struct spdk_iobuf_channel *channel;
580 : struct iobuf_module *module;
581 : struct spdk_iobuf_module_stats *it;
582 : uint32_t i, j;
583 :
584 0 : for (i = 0; i < ctx->num_modules; ++i) {
585 0 : for (j = 0; j < IOBUF_MAX_CHANNELS; ++j) {
586 0 : channel = iobuf_ch->channels[j];
587 0 : if (channel == NULL) {
588 0 : continue;
589 : }
590 :
591 0 : it = &ctx->modules[i];
592 0 : module = (struct iobuf_module *)channel->module;
593 0 : if (strcmp(it->module, module->name) == 0) {
594 0 : it->small_pool.cache += channel->small.stats.cache;
595 0 : it->small_pool.main += channel->small.stats.main;
596 0 : it->small_pool.retry += channel->small.stats.retry;
597 0 : it->large_pool.cache += channel->large.stats.cache;
598 0 : it->large_pool.main += channel->large.stats.main;
599 0 : it->large_pool.retry += channel->large.stats.retry;
600 0 : break;
601 : }
602 : }
603 : }
604 :
605 0 : spdk_for_each_channel_continue(iter, 0);
606 0 : }
607 :
608 : int
609 0 : spdk_iobuf_get_stats(spdk_iobuf_get_stats_cb cb_fn, void *cb_arg)
610 : {
611 : struct iobuf_module *module;
612 : struct iobuf_get_stats_ctx *ctx;
613 : uint32_t i;
614 :
615 0 : ctx = calloc(1, sizeof(*ctx));
616 0 : if (ctx == NULL) {
617 0 : return -ENOMEM;
618 : }
619 :
620 0 : TAILQ_FOREACH(module, &g_iobuf.modules, tailq) {
621 0 : ++ctx->num_modules;
622 : }
623 :
624 0 : ctx->modules = calloc(ctx->num_modules, sizeof(struct spdk_iobuf_module_stats));
625 0 : if (ctx->modules == NULL) {
626 0 : free(ctx);
627 0 : return -ENOMEM;
628 : }
629 :
630 0 : i = 0;
631 0 : TAILQ_FOREACH(module, &g_iobuf.modules, tailq) {
632 0 : ctx->modules[i].module = module->name;
633 0 : ++i;
634 : }
635 :
636 0 : ctx->cb_fn = cb_fn;
637 0 : ctx->cb_arg = cb_arg;
638 :
639 0 : spdk_for_each_channel(&g_iobuf, iobuf_get_channel_stats, ctx,
640 : iobuf_get_channel_stats_done);
641 0 : return 0;
642 : }
|