Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation.
3 : * All rights reserved.
4 : * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/env.h"
10 : #include "spdk/likely.h"
11 : #include "spdk/queue.h"
12 : #include "spdk/string.h"
13 : #include "spdk/thread.h"
14 : #include "spdk/trace.h"
15 : #include "spdk/util.h"
16 : #include "spdk/fd_group.h"
17 :
18 : #include "spdk/log.h"
19 : #include "spdk_internal/thread.h"
20 : #include "spdk_internal/usdt.h"
21 : #include "thread_internal.h"
22 :
23 : #include "spdk_internal/trace_defs.h"
24 :
25 : #ifdef __linux__
26 : #include <sys/timerfd.h>
27 : #include <sys/eventfd.h>
28 : #endif
29 :
30 : #ifdef SPDK_HAVE_EXECINFO_H
31 : #include <execinfo.h>
32 : #endif
33 :
34 : #define SPDK_MSG_BATCH_SIZE 8
35 : #define SPDK_MAX_DEVICE_NAME_LEN 256
36 : #define SPDK_THREAD_EXIT_TIMEOUT_SEC 5
37 : #define SPDK_MAX_POLLER_NAME_LEN 256
38 : #define SPDK_MAX_THREAD_NAME_LEN 256
39 :
40 : static struct spdk_thread *g_app_thread;
41 :
42 : struct spdk_interrupt {
43 : int efd;
44 : struct spdk_thread *thread;
45 : spdk_interrupt_fn fn;
46 : void *arg;
47 : char name[SPDK_MAX_POLLER_NAME_LEN + 1];
48 : };
49 :
50 : enum spdk_poller_state {
51 : /* The poller is registered with a thread but not currently executing its fn. */
52 : SPDK_POLLER_STATE_WAITING,
53 :
54 : /* The poller is currently running its fn. */
55 : SPDK_POLLER_STATE_RUNNING,
56 :
57 : /* The poller was unregistered during the execution of its fn. */
58 : SPDK_POLLER_STATE_UNREGISTERED,
59 :
60 : /* The poller is in the process of being paused. It will be paused
61 : * during the next time it's supposed to be executed.
62 : */
63 : SPDK_POLLER_STATE_PAUSING,
64 :
65 : /* The poller is registered but currently paused. It's on the
66 : * paused_pollers list.
67 : */
68 : SPDK_POLLER_STATE_PAUSED,
69 : };
70 :
71 : struct spdk_poller {
72 : TAILQ_ENTRY(spdk_poller) tailq;
73 : RB_ENTRY(spdk_poller) node;
74 :
75 : /* Current state of the poller; should only be accessed from the poller's thread. */
76 : enum spdk_poller_state state;
77 :
78 : uint64_t period_ticks;
79 : uint64_t next_run_tick;
80 : uint64_t run_count;
81 : uint64_t busy_count;
82 : uint64_t id;
83 : spdk_poller_fn fn;
84 : void *arg;
85 : struct spdk_thread *thread;
86 : struct spdk_interrupt *intr;
87 : spdk_poller_set_interrupt_mode_cb set_intr_cb_fn;
88 : void *set_intr_cb_arg;
89 :
90 : char name[SPDK_MAX_POLLER_NAME_LEN + 1];
91 : };
92 :
93 : enum spdk_thread_state {
94 : /* The thread is processing poller and message by spdk_thread_poll(). */
95 : SPDK_THREAD_STATE_RUNNING,
96 :
97 : /* The thread is in the process of termination. It reaps unregistering
98 : * poller are releasing I/O channel.
99 : */
100 : SPDK_THREAD_STATE_EXITING,
101 :
102 : /* The thread is exited. It is ready to call spdk_thread_destroy(). */
103 : SPDK_THREAD_STATE_EXITED,
104 : };
105 :
106 : struct spdk_thread {
107 : uint64_t tsc_last;
108 : struct spdk_thread_stats stats;
109 : /*
110 : * Contains pollers actively running on this thread. Pollers
111 : * are run round-robin. The thread takes one poller from the head
112 : * of the ring, executes it, then puts it back at the tail of
113 : * the ring.
114 : */
115 : TAILQ_HEAD(active_pollers_head, spdk_poller) active_pollers;
116 : /**
117 : * Contains pollers running on this thread with a periodic timer.
118 : */
119 : RB_HEAD(timed_pollers_tree, spdk_poller) timed_pollers;
120 : struct spdk_poller *first_timed_poller;
121 : /*
122 : * Contains paused pollers. Pollers on this queue are waiting until
123 : * they are resumed (in which case they're put onto the active/timer
124 : * queues) or unregistered.
125 : */
126 : TAILQ_HEAD(paused_pollers_head, spdk_poller) paused_pollers;
127 : struct spdk_ring *messages;
128 : int msg_fd;
129 : SLIST_HEAD(, spdk_msg) msg_cache;
130 : size_t msg_cache_count;
131 : spdk_msg_fn critical_msg;
132 : uint64_t id;
133 : uint64_t next_poller_id;
134 : enum spdk_thread_state state;
135 : int pending_unregister_count;
136 : uint32_t for_each_count;
137 :
138 : RB_HEAD(io_channel_tree, spdk_io_channel) io_channels;
139 : TAILQ_ENTRY(spdk_thread) tailq;
140 :
141 : char name[SPDK_MAX_THREAD_NAME_LEN + 1];
142 : struct spdk_cpuset cpumask;
143 : uint64_t exit_timeout_tsc;
144 :
145 : int32_t lock_count;
146 :
147 : /* spdk_thread is bound to current CPU core. */
148 : bool is_bound;
149 :
150 : /* Indicates whether this spdk_thread currently runs in interrupt. */
151 : bool in_interrupt;
152 : bool poller_unregistered;
153 : struct spdk_fd_group *fgrp;
154 :
155 : /* User context allocated at the end */
156 : uint8_t ctx[0];
157 : };
158 :
159 : static pthread_mutex_t g_devlist_mutex = PTHREAD_MUTEX_INITIALIZER;
160 :
161 : static spdk_new_thread_fn g_new_thread_fn = NULL;
162 : static spdk_thread_op_fn g_thread_op_fn = NULL;
163 : static spdk_thread_op_supported_fn g_thread_op_supported_fn;
164 : static size_t g_ctx_sz = 0;
165 : /* Monotonic increasing ID is set to each created thread beginning at 1. Once the
166 : * ID exceeds UINT64_MAX, further thread creation is not allowed and restarting
167 : * SPDK application is required.
168 : */
169 : static uint64_t g_thread_id = 1;
170 :
171 : enum spin_error {
172 : SPIN_ERR_NONE,
173 : /* Trying to use an SPDK lock while not on an SPDK thread */
174 : SPIN_ERR_NOT_SPDK_THREAD,
175 : /* Trying to lock a lock already held by this SPDK thread */
176 : SPIN_ERR_DEADLOCK,
177 : /* Trying to unlock a lock not held by this SPDK thread */
178 : SPIN_ERR_WRONG_THREAD,
179 : /* pthread_spin_*() returned an error */
180 : SPIN_ERR_PTHREAD,
181 : /* Trying to destroy a lock that is held */
182 : SPIN_ERR_LOCK_HELD,
183 : /* lock_count is invalid */
184 : SPIN_ERR_LOCK_COUNT,
185 : /*
186 : * An spdk_thread may migrate to another pthread. A spinlock held across migration leads to
187 : * undefined behavior. A spinlock held when an SPDK thread goes off CPU would lead to
188 : * deadlock when another SPDK thread on the same pthread tries to take that lock.
189 : */
190 : SPIN_ERR_HOLD_DURING_SWITCH,
191 : /* Trying to use a lock that was destroyed (but not re-initialized) */
192 : SPIN_ERR_DESTROYED,
193 : /* Trying to use a lock that is not initialized */
194 : SPIN_ERR_NOT_INITIALIZED,
195 :
196 : /* Must be last, not an actual error code */
197 : SPIN_ERR_LAST
198 : };
199 :
200 : static const char *spin_error_strings[] = {
201 : [SPIN_ERR_NONE] = "No error",
202 : [SPIN_ERR_NOT_SPDK_THREAD] = "Not an SPDK thread",
203 : [SPIN_ERR_DEADLOCK] = "Deadlock detected",
204 : [SPIN_ERR_WRONG_THREAD] = "Unlock on wrong SPDK thread",
205 : [SPIN_ERR_PTHREAD] = "Error from pthread_spinlock",
206 : [SPIN_ERR_LOCK_HELD] = "Destroying a held spinlock",
207 : [SPIN_ERR_LOCK_COUNT] = "Lock count is invalid",
208 : [SPIN_ERR_HOLD_DURING_SWITCH] = "Lock(s) held while SPDK thread going off CPU",
209 : [SPIN_ERR_DESTROYED] = "Lock has been destroyed",
210 : [SPIN_ERR_NOT_INITIALIZED] = "Lock has not been initialized",
211 : };
212 :
213 : #define SPIN_ERROR_STRING(err) (err < 0 || err >= SPDK_COUNTOF(spin_error_strings)) \
214 : ? "Unknown error" : spin_error_strings[err]
215 :
216 : static void
217 0 : __posix_abort(enum spin_error err)
218 : {
219 0 : abort();
220 : }
221 :
222 : typedef void (*spin_abort)(enum spin_error err);
223 : spin_abort g_spin_abort_fn = __posix_abort;
224 :
225 : #define SPIN_ASSERT_IMPL(cond, err, extra_log, ret) \
226 : do { \
227 : if (spdk_unlikely(!(cond))) { \
228 : SPDK_ERRLOG("unrecoverable spinlock error %d: %s (%s)\n", err, \
229 : SPIN_ERROR_STRING(err), #cond); \
230 : extra_log; \
231 : g_spin_abort_fn(err); \
232 : ret; \
233 : } \
234 : } while (0)
235 : #define SPIN_ASSERT_LOG_STACKS(cond, err, lock) \
236 : SPIN_ASSERT_IMPL(cond, err, sspin_stacks_print(sspin), return)
237 : #define SPIN_ASSERT_RETURN(cond, err, ret) SPIN_ASSERT_IMPL(cond, err, , return ret)
238 : #define SPIN_ASSERT(cond, err) SPIN_ASSERT_IMPL(cond, err, ,)
239 :
240 : struct io_device {
241 : void *io_device;
242 : char name[SPDK_MAX_DEVICE_NAME_LEN + 1];
243 : spdk_io_channel_create_cb create_cb;
244 : spdk_io_channel_destroy_cb destroy_cb;
245 : spdk_io_device_unregister_cb unregister_cb;
246 : struct spdk_thread *unregister_thread;
247 : uint32_t ctx_size;
248 : uint32_t for_each_count;
249 : RB_ENTRY(io_device) node;
250 :
251 : uint32_t refcnt;
252 :
253 : bool pending_unregister;
254 : bool unregistered;
255 : };
256 :
257 : static RB_HEAD(io_device_tree, io_device) g_io_devices = RB_INITIALIZER(g_io_devices);
258 :
259 : static int
260 12231 : io_device_cmp(struct io_device *dev1, struct io_device *dev2)
261 : {
262 12231 : return (dev1->io_device < dev2->io_device ? -1 : dev1->io_device > dev2->io_device);
263 : }
264 :
265 23065 : RB_GENERATE_STATIC(io_device_tree, io_device, node, io_device_cmp);
266 :
267 : static int
268 8088 : io_channel_cmp(struct spdk_io_channel *ch1, struct spdk_io_channel *ch2)
269 : {
270 8088 : return (ch1->dev < ch2->dev ? -1 : ch1->dev > ch2->dev);
271 : }
272 :
273 23359 : RB_GENERATE_STATIC(io_channel_tree, spdk_io_channel, node, io_channel_cmp);
274 :
275 : struct spdk_msg {
276 : spdk_msg_fn fn;
277 : void *arg;
278 :
279 : SLIST_ENTRY(spdk_msg) link;
280 : };
281 :
282 : static struct spdk_mempool *g_spdk_msg_mempool = NULL;
283 :
284 : static TAILQ_HEAD(, spdk_thread) g_threads = TAILQ_HEAD_INITIALIZER(g_threads);
285 : static uint32_t g_thread_count = 0;
286 :
287 : static __thread struct spdk_thread *tls_thread = NULL;
288 :
289 : static void
290 0 : thread_trace(void)
291 : {
292 0 : spdk_trace_register_description("THREAD_IOCH_GET",
293 : TRACE_THREAD_IOCH_GET,
294 : OWNER_NONE, OBJECT_NONE, 0,
295 : SPDK_TRACE_ARG_TYPE_INT, "refcnt");
296 0 : spdk_trace_register_description("THREAD_IOCH_PUT",
297 : TRACE_THREAD_IOCH_PUT,
298 : OWNER_NONE, OBJECT_NONE, 0,
299 : SPDK_TRACE_ARG_TYPE_INT, "refcnt");
300 0 : }
301 41 : SPDK_TRACE_REGISTER_FN(thread_trace, "thread", TRACE_GROUP_THREAD)
302 :
303 : /*
304 : * If this compare function returns zero when two next_run_ticks are equal,
305 : * the macro RB_INSERT() returns a pointer to the element with the same
306 : * next_run_tick.
307 : *
308 : * Fortunately, the macro RB_REMOVE() takes not a key but a pointer to the element
309 : * to remove as a parameter.
310 : *
311 : * Hence we allow RB_INSERT() to insert elements with the same keys on the right
312 : * side by returning 1 when two next_run_ticks are equal.
313 : */
314 : static inline int
315 580 : timed_poller_compare(struct spdk_poller *poller1, struct spdk_poller *poller2)
316 : {
317 580 : if (poller1->next_run_tick < poller2->next_run_tick) {
318 261 : return -1;
319 : } else {
320 319 : return 1;
321 : }
322 580 : }
323 :
324 4775 : RB_GENERATE_STATIC(timed_pollers_tree, spdk_poller, node, timed_poller_compare);
325 :
326 : static inline struct spdk_thread *
327 696121 : _get_thread(void)
328 : {
329 696121 : return tls_thread;
330 : }
331 :
332 : static int
333 84 : _thread_lib_init(size_t ctx_sz, size_t msg_mempool_sz)
334 : {
335 84 : char mempool_name[SPDK_MAX_MEMZONE_NAME_LEN];
336 :
337 84 : g_ctx_sz = ctx_sz;
338 :
339 84 : snprintf(mempool_name, sizeof(mempool_name), "msgpool_%d", getpid());
340 84 : g_spdk_msg_mempool = spdk_mempool_create(mempool_name, msg_mempool_sz,
341 : sizeof(struct spdk_msg),
342 : 0, /* No cache. We do our own. */
343 : SPDK_ENV_SOCKET_ID_ANY);
344 :
345 84 : SPDK_DEBUGLOG(thread, "spdk_msg_mempool was created with size: %zu\n",
346 : msg_mempool_sz);
347 :
348 84 : if (!g_spdk_msg_mempool) {
349 0 : SPDK_ERRLOG("spdk_msg_mempool creation failed\n");
350 0 : return -ENOMEM;
351 : }
352 :
353 84 : return 0;
354 84 : }
355 :
356 : static void thread_interrupt_destroy(struct spdk_thread *thread);
357 : static int thread_interrupt_create(struct spdk_thread *thread);
358 :
359 : static void
360 165 : _free_thread(struct spdk_thread *thread)
361 : {
362 165 : struct spdk_io_channel *ch;
363 165 : struct spdk_msg *msg;
364 165 : struct spdk_poller *poller, *ptmp;
365 :
366 165 : RB_FOREACH(ch, io_channel_tree, &thread->io_channels) {
367 0 : SPDK_ERRLOG("thread %s still has channel for io_device %s\n",
368 : thread->name, ch->dev->name);
369 0 : }
370 :
371 165 : TAILQ_FOREACH_SAFE(poller, &thread->active_pollers, tailq, ptmp) {
372 0 : if (poller->state != SPDK_POLLER_STATE_UNREGISTERED) {
373 0 : SPDK_WARNLOG("active_poller %s still registered at thread exit\n",
374 : poller->name);
375 0 : }
376 0 : TAILQ_REMOVE(&thread->active_pollers, poller, tailq);
377 0 : free(poller);
378 0 : }
379 :
380 187 : RB_FOREACH_SAFE(poller, timed_pollers_tree, &thread->timed_pollers, ptmp) {
381 22 : if (poller->state != SPDK_POLLER_STATE_UNREGISTERED) {
382 0 : SPDK_WARNLOG("timed_poller %s still registered at thread exit\n",
383 : poller->name);
384 0 : }
385 22 : RB_REMOVE(timed_pollers_tree, &thread->timed_pollers, poller);
386 22 : free(poller);
387 22 : }
388 :
389 165 : TAILQ_FOREACH_SAFE(poller, &thread->paused_pollers, tailq, ptmp) {
390 0 : SPDK_WARNLOG("paused_poller %s still registered at thread exit\n", poller->name);
391 0 : TAILQ_REMOVE(&thread->paused_pollers, poller, tailq);
392 0 : free(poller);
393 0 : }
394 :
395 165 : pthread_mutex_lock(&g_devlist_mutex);
396 165 : assert(g_thread_count > 0);
397 165 : g_thread_count--;
398 165 : TAILQ_REMOVE(&g_threads, thread, tailq);
399 165 : pthread_mutex_unlock(&g_devlist_mutex);
400 :
401 165 : msg = SLIST_FIRST(&thread->msg_cache);
402 168972 : while (msg != NULL) {
403 168807 : SLIST_REMOVE_HEAD(&thread->msg_cache, link);
404 :
405 168807 : assert(thread->msg_cache_count > 0);
406 168807 : thread->msg_cache_count--;
407 168807 : spdk_mempool_put(g_spdk_msg_mempool, msg);
408 :
409 168807 : msg = SLIST_FIRST(&thread->msg_cache);
410 : }
411 :
412 165 : assert(thread->msg_cache_count == 0);
413 :
414 165 : if (spdk_interrupt_mode_is_enabled()) {
415 0 : thread_interrupt_destroy(thread);
416 0 : }
417 :
418 165 : spdk_ring_free(thread->messages);
419 165 : free(thread);
420 165 : }
421 :
422 : int
423 74 : spdk_thread_lib_init(spdk_new_thread_fn new_thread_fn, size_t ctx_sz)
424 : {
425 74 : assert(g_new_thread_fn == NULL);
426 74 : assert(g_thread_op_fn == NULL);
427 :
428 74 : if (new_thread_fn == NULL) {
429 73 : SPDK_INFOLOG(thread, "new_thread_fn was not specified at spdk_thread_lib_init\n");
430 73 : } else {
431 1 : g_new_thread_fn = new_thread_fn;
432 : }
433 :
434 74 : return _thread_lib_init(ctx_sz, SPDK_DEFAULT_MSG_MEMPOOL_SIZE);
435 : }
436 :
437 : int
438 10 : spdk_thread_lib_init_ext(spdk_thread_op_fn thread_op_fn,
439 : spdk_thread_op_supported_fn thread_op_supported_fn,
440 : size_t ctx_sz, size_t msg_mempool_sz)
441 : {
442 10 : assert(g_new_thread_fn == NULL);
443 10 : assert(g_thread_op_fn == NULL);
444 10 : assert(g_thread_op_supported_fn == NULL);
445 :
446 10 : if ((thread_op_fn != NULL) != (thread_op_supported_fn != NULL)) {
447 0 : SPDK_ERRLOG("Both must be defined or undefined together.\n");
448 0 : return -EINVAL;
449 : }
450 :
451 10 : if (thread_op_fn == NULL && thread_op_supported_fn == NULL) {
452 0 : SPDK_INFOLOG(thread, "thread_op_fn and thread_op_supported_fn were not specified\n");
453 0 : } else {
454 10 : g_thread_op_fn = thread_op_fn;
455 10 : g_thread_op_supported_fn = thread_op_supported_fn;
456 : }
457 :
458 10 : return _thread_lib_init(ctx_sz, msg_mempool_sz);
459 10 : }
460 :
461 : void
462 83 : spdk_thread_lib_fini(void)
463 : {
464 83 : struct io_device *dev;
465 :
466 84 : RB_FOREACH(dev, io_device_tree, &g_io_devices) {
467 1 : SPDK_ERRLOG("io_device %s not unregistered\n", dev->name);
468 1 : }
469 :
470 83 : g_new_thread_fn = NULL;
471 83 : g_thread_op_fn = NULL;
472 83 : g_thread_op_supported_fn = NULL;
473 83 : g_ctx_sz = 0;
474 83 : if (g_app_thread != NULL) {
475 80 : _free_thread(g_app_thread);
476 80 : g_app_thread = NULL;
477 80 : }
478 :
479 83 : if (g_spdk_msg_mempool) {
480 83 : spdk_mempool_free(g_spdk_msg_mempool);
481 83 : g_spdk_msg_mempool = NULL;
482 83 : }
483 83 : }
484 :
485 : struct spdk_thread *
486 200 : spdk_thread_create(const char *name, const struct spdk_cpuset *cpumask)
487 : {
488 200 : struct spdk_thread *thread, *null_thread;
489 200 : struct spdk_msg *msgs[SPDK_MSG_MEMPOOL_CACHE_SIZE];
490 200 : int rc = 0, i;
491 :
492 200 : thread = calloc(1, sizeof(*thread) + g_ctx_sz);
493 200 : if (!thread) {
494 0 : SPDK_ERRLOG("Unable to allocate memory for thread\n");
495 0 : return NULL;
496 : }
497 :
498 200 : if (cpumask) {
499 22 : spdk_cpuset_copy(&thread->cpumask, cpumask);
500 22 : } else {
501 178 : spdk_cpuset_negate(&thread->cpumask);
502 : }
503 :
504 200 : RB_INIT(&thread->io_channels);
505 200 : TAILQ_INIT(&thread->active_pollers);
506 200 : RB_INIT(&thread->timed_pollers);
507 200 : TAILQ_INIT(&thread->paused_pollers);
508 200 : SLIST_INIT(&thread->msg_cache);
509 200 : thread->msg_cache_count = 0;
510 :
511 200 : thread->tsc_last = spdk_get_ticks();
512 :
513 : /* Monotonic increasing ID is set to each created poller beginning at 1. Once the
514 : * ID exceeds UINT64_MAX a warning message is logged
515 : */
516 200 : thread->next_poller_id = 1;
517 :
518 200 : thread->messages = spdk_ring_create(SPDK_RING_TYPE_MP_SC, 65536, SPDK_ENV_SOCKET_ID_ANY);
519 200 : if (!thread->messages) {
520 0 : SPDK_ERRLOG("Unable to allocate memory for message ring\n");
521 0 : free(thread);
522 0 : return NULL;
523 : }
524 :
525 : /* Fill the local message pool cache. */
526 200 : rc = spdk_mempool_get_bulk(g_spdk_msg_mempool, (void **)msgs, SPDK_MSG_MEMPOOL_CACHE_SIZE);
527 200 : if (rc == 0) {
528 : /* If we can't populate the cache it's ok. The cache will get filled
529 : * up organically as messages are passed to the thread. */
530 205000 : for (i = 0; i < SPDK_MSG_MEMPOOL_CACHE_SIZE; i++) {
531 204800 : SLIST_INSERT_HEAD(&thread->msg_cache, msgs[i], link);
532 204800 : thread->msg_cache_count++;
533 204800 : }
534 200 : }
535 :
536 200 : if (name) {
537 40 : snprintf(thread->name, sizeof(thread->name), "%s", name);
538 40 : } else {
539 160 : snprintf(thread->name, sizeof(thread->name), "%p", thread);
540 : }
541 :
542 200 : pthread_mutex_lock(&g_devlist_mutex);
543 200 : if (g_thread_id == 0) {
544 0 : SPDK_ERRLOG("Thread ID rolled over. Further thread creation is not allowed.\n");
545 0 : pthread_mutex_unlock(&g_devlist_mutex);
546 0 : _free_thread(thread);
547 0 : return NULL;
548 : }
549 200 : thread->id = g_thread_id++;
550 200 : TAILQ_INSERT_TAIL(&g_threads, thread, tailq);
551 200 : g_thread_count++;
552 200 : pthread_mutex_unlock(&g_devlist_mutex);
553 :
554 200 : SPDK_DEBUGLOG(thread, "Allocating new thread (%" PRIu64 ", %s)\n",
555 : thread->id, thread->name);
556 :
557 200 : if (spdk_interrupt_mode_is_enabled()) {
558 0 : thread->in_interrupt = true;
559 0 : rc = thread_interrupt_create(thread);
560 0 : if (rc != 0) {
561 0 : _free_thread(thread);
562 0 : return NULL;
563 : }
564 0 : }
565 :
566 200 : if (g_new_thread_fn) {
567 2 : rc = g_new_thread_fn(thread);
568 200 : } else if (g_thread_op_supported_fn && g_thread_op_supported_fn(SPDK_THREAD_OP_NEW)) {
569 15 : rc = g_thread_op_fn(thread, SPDK_THREAD_OP_NEW);
570 15 : }
571 :
572 200 : if (rc != 0) {
573 2 : _free_thread(thread);
574 2 : return NULL;
575 : }
576 :
577 198 : thread->state = SPDK_THREAD_STATE_RUNNING;
578 :
579 : /* If this is the first thread, save it as the app thread. Use an atomic
580 : * compare + exchange to guard against crazy users who might try to
581 : * call spdk_thread_create() simultaneously on multiple threads.
582 : */
583 198 : null_thread = NULL;
584 198 : __atomic_compare_exchange_n(&g_app_thread, &null_thread, thread, false,
585 : __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
586 :
587 198 : return thread;
588 200 : }
589 :
590 : struct spdk_thread *
591 1436 : spdk_thread_get_app_thread(void)
592 : {
593 1436 : return g_app_thread;
594 : }
595 :
596 : bool
597 147 : spdk_thread_is_app_thread(struct spdk_thread *thread)
598 : {
599 147 : if (thread == NULL) {
600 146 : thread = _get_thread();
601 146 : }
602 :
603 147 : return g_app_thread == thread;
604 : }
605 :
606 : void
607 1 : spdk_thread_bind(struct spdk_thread *thread, bool bind)
608 : {
609 1 : thread->is_bound = bind;
610 1 : }
611 :
612 : bool
613 10 : spdk_thread_is_bound(struct spdk_thread *thread)
614 : {
615 10 : return thread->is_bound;
616 : }
617 :
618 : void
619 230134 : spdk_set_thread(struct spdk_thread *thread)
620 : {
621 230134 : tls_thread = thread;
622 230134 : }
623 :
624 : static void
625 201 : thread_exit(struct spdk_thread *thread, uint64_t now)
626 : {
627 201 : struct spdk_poller *poller;
628 201 : struct spdk_io_channel *ch;
629 :
630 201 : if (now >= thread->exit_timeout_tsc) {
631 1 : SPDK_ERRLOG("thread %s got timeout, and move it to the exited state forcefully\n",
632 : thread->name);
633 1 : goto exited;
634 : }
635 :
636 200 : if (spdk_ring_count(thread->messages) > 0) {
637 3 : SPDK_INFOLOG(thread, "thread %s still has messages\n", thread->name);
638 3 : return;
639 : }
640 :
641 197 : if (thread->for_each_count > 0) {
642 4 : SPDK_INFOLOG(thread, "thread %s is still executing %u for_each_channels/threads\n",
643 : thread->name, thread->for_each_count);
644 4 : return;
645 : }
646 :
647 193 : TAILQ_FOREACH(poller, &thread->active_pollers, tailq) {
648 2 : if (poller->state != SPDK_POLLER_STATE_UNREGISTERED) {
649 2 : SPDK_INFOLOG(thread,
650 : "thread %s still has active poller %s\n",
651 : thread->name, poller->name);
652 2 : return;
653 : }
654 0 : }
655 :
656 233 : RB_FOREACH(poller, timed_pollers_tree, &thread->timed_pollers) {
657 42 : if (poller->state != SPDK_POLLER_STATE_UNREGISTERED) {
658 0 : SPDK_INFOLOG(thread,
659 : "thread %s still has active timed poller %s\n",
660 : thread->name, poller->name);
661 0 : return;
662 : }
663 42 : }
664 :
665 191 : TAILQ_FOREACH(poller, &thread->paused_pollers, tailq) {
666 0 : SPDK_INFOLOG(thread,
667 : "thread %s still has paused poller %s\n",
668 : thread->name, poller->name);
669 0 : return;
670 : }
671 :
672 191 : RB_FOREACH(ch, io_channel_tree, &thread->io_channels) {
673 2 : SPDK_INFOLOG(thread,
674 : "thread %s still has channel for io_device %s\n",
675 : thread->name, ch->dev->name);
676 2 : return;
677 : }
678 :
679 189 : if (thread->pending_unregister_count > 0) {
680 2 : SPDK_INFOLOG(thread,
681 : "thread %s is still unregistering io_devices\n",
682 : thread->name);
683 2 : return;
684 : }
685 :
686 : exited:
687 188 : thread->state = SPDK_THREAD_STATE_EXITED;
688 188 : if (spdk_unlikely(thread->in_interrupt)) {
689 0 : g_thread_op_fn(thread, SPDK_THREAD_OP_RESCHED);
690 0 : }
691 201 : }
692 :
693 : static void _thread_exit(void *ctx);
694 :
695 : int
696 197 : spdk_thread_exit(struct spdk_thread *thread)
697 : {
698 197 : SPDK_DEBUGLOG(thread, "Exit thread %s\n", thread->name);
699 :
700 197 : assert(tls_thread == thread);
701 :
702 197 : if (thread->state >= SPDK_THREAD_STATE_EXITING) {
703 9 : SPDK_INFOLOG(thread,
704 : "thread %s is already exiting\n",
705 : thread->name);
706 9 : return 0;
707 : }
708 :
709 188 : thread->exit_timeout_tsc = spdk_get_ticks() + (spdk_get_ticks_hz() *
710 : SPDK_THREAD_EXIT_TIMEOUT_SEC);
711 188 : thread->state = SPDK_THREAD_STATE_EXITING;
712 :
713 188 : if (spdk_interrupt_mode_is_enabled()) {
714 0 : spdk_thread_send_msg(thread, _thread_exit, thread);
715 0 : }
716 :
717 188 : return 0;
718 197 : }
719 :
720 : bool
721 0 : spdk_thread_is_running(struct spdk_thread *thread)
722 : {
723 0 : return thread->state == SPDK_THREAD_STATE_RUNNING;
724 : }
725 :
726 : bool
727 377 : spdk_thread_is_exited(struct spdk_thread *thread)
728 : {
729 377 : return thread->state == SPDK_THREAD_STATE_EXITED;
730 : }
731 :
732 : void
733 167 : spdk_thread_destroy(struct spdk_thread *thread)
734 : {
735 167 : assert(thread != NULL);
736 167 : SPDK_DEBUGLOG(thread, "Destroy thread %s\n", thread->name);
737 :
738 167 : assert(thread->state == SPDK_THREAD_STATE_EXITED);
739 :
740 167 : if (tls_thread == thread) {
741 161 : tls_thread = NULL;
742 161 : }
743 :
744 : /* To be safe, do not free the app thread until spdk_thread_lib_fini(). */
745 167 : if (thread != g_app_thread) {
746 83 : _free_thread(thread);
747 83 : }
748 167 : }
749 :
750 : void *
751 47 : spdk_thread_get_ctx(struct spdk_thread *thread)
752 : {
753 47 : if (g_ctx_sz > 0) {
754 47 : return thread->ctx;
755 : }
756 :
757 0 : return NULL;
758 47 : }
759 :
760 : struct spdk_cpuset *
761 44 : spdk_thread_get_cpumask(struct spdk_thread *thread)
762 : {
763 44 : return &thread->cpumask;
764 : }
765 :
766 : int
767 2 : spdk_thread_set_cpumask(struct spdk_cpuset *cpumask)
768 : {
769 2 : struct spdk_thread *thread;
770 :
771 2 : if (!g_thread_op_supported_fn || !g_thread_op_supported_fn(SPDK_THREAD_OP_RESCHED)) {
772 0 : SPDK_ERRLOG("Framework does not support reschedule operation.\n");
773 0 : assert(false);
774 : return -ENOTSUP;
775 : }
776 :
777 2 : thread = spdk_get_thread();
778 2 : if (!thread) {
779 0 : SPDK_ERRLOG("Called from non-SPDK thread\n");
780 0 : assert(false);
781 : return -EINVAL;
782 : }
783 :
784 2 : spdk_cpuset_copy(&thread->cpumask, cpumask);
785 :
786 : /* Invoke framework's reschedule operation. If this function is called multiple times
787 : * in a single spdk_thread_poll() context, the last cpumask will be used in the
788 : * reschedule operation.
789 : */
790 2 : g_thread_op_fn(thread, SPDK_THREAD_OP_RESCHED);
791 :
792 2 : return 0;
793 2 : }
794 :
795 : struct spdk_thread *
796 202 : spdk_thread_get_from_ctx(void *ctx)
797 : {
798 202 : if (ctx == NULL) {
799 0 : assert(false);
800 : return NULL;
801 : }
802 :
803 202 : assert(g_ctx_sz > 0);
804 :
805 202 : return SPDK_CONTAINEROF(ctx, struct spdk_thread, ctx);
806 : }
807 :
808 : static inline uint32_t
809 341370 : msg_queue_run_batch(struct spdk_thread *thread, uint32_t max_msgs)
810 : {
811 341370 : unsigned count, i;
812 341370 : void *messages[SPDK_MSG_BATCH_SIZE];
813 341370 : uint64_t notify = 1;
814 341370 : int rc;
815 :
816 : #ifdef DEBUG
817 : /*
818 : * spdk_ring_dequeue() fills messages and returns how many entries it wrote,
819 : * so we will never actually read uninitialized data from events, but just to be sure
820 : * (and to silence a static analyzer false positive), initialize the array to NULL pointers.
821 : */
822 341370 : memset(messages, 0, sizeof(messages));
823 : #endif
824 :
825 341370 : if (max_msgs > 0) {
826 269 : max_msgs = spdk_min(max_msgs, SPDK_MSG_BATCH_SIZE);
827 269 : } else {
828 341101 : max_msgs = SPDK_MSG_BATCH_SIZE;
829 : }
830 :
831 341370 : count = spdk_ring_dequeue(thread->messages, messages, max_msgs);
832 341370 : if (spdk_unlikely(thread->in_interrupt) &&
833 0 : spdk_ring_count(thread->messages) != 0) {
834 0 : rc = write(thread->msg_fd, ¬ify, sizeof(notify));
835 0 : if (rc < 0) {
836 0 : SPDK_ERRLOG("failed to notify msg_queue: %s.\n", spdk_strerror(errno));
837 0 : }
838 0 : }
839 341370 : if (count == 0) {
840 240395 : return 0;
841 : }
842 :
843 206627 : for (i = 0; i < count; i++) {
844 105652 : struct spdk_msg *msg = messages[i];
845 :
846 105652 : assert(msg != NULL);
847 :
848 : SPDK_DTRACE_PROBE2(msg_exec, msg->fn, msg->arg);
849 :
850 105652 : msg->fn(msg->arg);
851 :
852 105652 : SPIN_ASSERT(thread->lock_count == 0, SPIN_ERR_HOLD_DURING_SWITCH);
853 :
854 105652 : if (thread->msg_cache_count < SPDK_MSG_MEMPOOL_CACHE_SIZE) {
855 : /* Insert the messages at the head. We want to re-use the hot
856 : * ones. */
857 105506 : SLIST_INSERT_HEAD(&thread->msg_cache, msg, link);
858 105506 : thread->msg_cache_count++;
859 105506 : } else {
860 146 : spdk_mempool_put(g_spdk_msg_mempool, msg);
861 : }
862 105652 : }
863 :
864 100975 : return count;
865 341370 : }
866 :
867 : static void
868 468 : poller_insert_timer(struct spdk_thread *thread, struct spdk_poller *poller, uint64_t now)
869 : {
870 468 : struct spdk_poller *tmp __attribute__((unused));
871 :
872 468 : poller->next_run_tick = now + poller->period_ticks;
873 :
874 : /*
875 : * Insert poller in the thread's timed_pollers tree by next scheduled run time
876 : * as its key.
877 : */
878 468 : tmp = RB_INSERT(timed_pollers_tree, &thread->timed_pollers, poller);
879 468 : assert(tmp == NULL);
880 :
881 : /* Update the cache only if it is empty or the inserted poller is earlier than it.
882 : * RB_MIN() is not necessary here because all pollers, which has exactly the same
883 : * next_run_tick as the existing poller, are inserted on the right side.
884 : */
885 819 : if (thread->first_timed_poller == NULL ||
886 351 : poller->next_run_tick < thread->first_timed_poller->next_run_tick) {
887 244 : thread->first_timed_poller = poller;
888 244 : }
889 468 : }
890 :
891 : static inline void
892 0 : poller_remove_timer(struct spdk_thread *thread, struct spdk_poller *poller)
893 : {
894 0 : struct spdk_poller *tmp __attribute__((unused));
895 :
896 0 : tmp = RB_REMOVE(timed_pollers_tree, &thread->timed_pollers, poller);
897 0 : assert(tmp != NULL);
898 :
899 : /* This function is not used in any case that is performance critical.
900 : * Update the cache simply by RB_MIN() if it needs to be changed.
901 : */
902 0 : if (thread->first_timed_poller == poller) {
903 0 : thread->first_timed_poller = RB_MIN(timed_pollers_tree, &thread->timed_pollers);
904 0 : }
905 0 : }
906 :
907 : static void
908 778 : thread_insert_poller(struct spdk_thread *thread, struct spdk_poller *poller)
909 : {
910 778 : if (poller->period_ticks) {
911 334 : poller_insert_timer(thread, poller, spdk_get_ticks());
912 334 : } else {
913 444 : TAILQ_INSERT_TAIL(&thread->active_pollers, poller, tailq);
914 : }
915 778 : }
916 :
917 : static inline void
918 341370 : thread_update_stats(struct spdk_thread *thread, uint64_t end,
919 : uint64_t start, int rc)
920 : {
921 341370 : if (rc == 0) {
922 : /* Poller status idle */
923 239968 : thread->stats.idle_tsc += end - start;
924 341370 : } else if (rc > 0) {
925 : /* Poller status busy */
926 101402 : thread->stats.busy_tsc += end - start;
927 101402 : }
928 : /* Store end time to use it as start time of the next spdk_thread_poll(). */
929 341370 : thread->tsc_last = end;
930 341370 : }
931 :
932 : static inline int
933 1666 : thread_execute_poller(struct spdk_thread *thread, struct spdk_poller *poller)
934 : {
935 1666 : int rc;
936 :
937 1666 : switch (poller->state) {
938 : case SPDK_POLLER_STATE_UNREGISTERED:
939 101 : TAILQ_REMOVE(&thread->active_pollers, poller, tailq);
940 101 : free(poller);
941 101 : return 0;
942 : case SPDK_POLLER_STATE_PAUSING:
943 4 : TAILQ_REMOVE(&thread->active_pollers, poller, tailq);
944 4 : TAILQ_INSERT_TAIL(&thread->paused_pollers, poller, tailq);
945 4 : poller->state = SPDK_POLLER_STATE_PAUSED;
946 4 : return 0;
947 : case SPDK_POLLER_STATE_WAITING:
948 1561 : break;
949 : default:
950 0 : assert(false);
951 : break;
952 : }
953 :
954 1561 : poller->state = SPDK_POLLER_STATE_RUNNING;
955 1561 : rc = poller->fn(poller->arg);
956 :
957 1561 : SPIN_ASSERT(thread->lock_count == 0, SPIN_ERR_HOLD_DURING_SWITCH);
958 :
959 1561 : poller->run_count++;
960 1561 : if (rc > 0) {
961 433 : poller->busy_count++;
962 433 : }
963 :
964 : #ifdef DEBUG
965 1561 : if (rc == -1) {
966 19 : SPDK_DEBUGLOG(thread, "Poller %s returned -1\n", poller->name);
967 19 : }
968 : #endif
969 :
970 1561 : switch (poller->state) {
971 : case SPDK_POLLER_STATE_UNREGISTERED:
972 341 : TAILQ_REMOVE(&thread->active_pollers, poller, tailq);
973 341 : free(poller);
974 341 : break;
975 : case SPDK_POLLER_STATE_PAUSING:
976 6 : TAILQ_REMOVE(&thread->active_pollers, poller, tailq);
977 6 : TAILQ_INSERT_TAIL(&thread->paused_pollers, poller, tailq);
978 6 : poller->state = SPDK_POLLER_STATE_PAUSED;
979 6 : break;
980 : case SPDK_POLLER_STATE_PAUSED:
981 : case SPDK_POLLER_STATE_WAITING:
982 0 : break;
983 : case SPDK_POLLER_STATE_RUNNING:
984 1214 : poller->state = SPDK_POLLER_STATE_WAITING;
985 1214 : break;
986 : default:
987 0 : assert(false);
988 : break;
989 : }
990 :
991 1561 : return rc;
992 1666 : }
993 :
994 : static inline int
995 426 : thread_execute_timed_poller(struct spdk_thread *thread, struct spdk_poller *poller,
996 : uint64_t now)
997 : {
998 426 : int rc;
999 :
1000 426 : switch (poller->state) {
1001 : case SPDK_POLLER_STATE_UNREGISTERED:
1002 133 : free(poller);
1003 133 : return 0;
1004 : case SPDK_POLLER_STATE_PAUSING:
1005 13 : TAILQ_INSERT_TAIL(&thread->paused_pollers, poller, tailq);
1006 13 : poller->state = SPDK_POLLER_STATE_PAUSED;
1007 13 : return 0;
1008 : case SPDK_POLLER_STATE_WAITING:
1009 280 : break;
1010 : default:
1011 0 : assert(false);
1012 : break;
1013 : }
1014 :
1015 280 : poller->state = SPDK_POLLER_STATE_RUNNING;
1016 280 : rc = poller->fn(poller->arg);
1017 :
1018 280 : SPIN_ASSERT(thread->lock_count == 0, SPIN_ERR_HOLD_DURING_SWITCH);
1019 :
1020 280 : poller->run_count++;
1021 280 : if (rc > 0) {
1022 200 : poller->busy_count++;
1023 200 : }
1024 :
1025 : #ifdef DEBUG
1026 280 : if (rc == -1) {
1027 5 : SPDK_DEBUGLOG(thread, "Timed poller %s returned -1\n", poller->name);
1028 5 : }
1029 : #endif
1030 :
1031 280 : switch (poller->state) {
1032 : case SPDK_POLLER_STATE_UNREGISTERED:
1033 142 : free(poller);
1034 142 : break;
1035 : case SPDK_POLLER_STATE_PAUSING:
1036 4 : TAILQ_INSERT_TAIL(&thread->paused_pollers, poller, tailq);
1037 4 : poller->state = SPDK_POLLER_STATE_PAUSED;
1038 4 : break;
1039 : case SPDK_POLLER_STATE_PAUSED:
1040 0 : break;
1041 : case SPDK_POLLER_STATE_RUNNING:
1042 134 : poller->state = SPDK_POLLER_STATE_WAITING;
1043 : /* fallthrough */
1044 : case SPDK_POLLER_STATE_WAITING:
1045 134 : poller_insert_timer(thread, poller, now);
1046 134 : break;
1047 : default:
1048 0 : assert(false);
1049 : break;
1050 : }
1051 :
1052 280 : return rc;
1053 426 : }
1054 :
1055 : static int
1056 341362 : thread_poll(struct spdk_thread *thread, uint32_t max_msgs, uint64_t now)
1057 : {
1058 341362 : uint32_t msg_count;
1059 341362 : struct spdk_poller *poller, *tmp;
1060 341362 : spdk_msg_fn critical_msg;
1061 341362 : int rc = 0;
1062 :
1063 341362 : thread->tsc_last = now;
1064 :
1065 341362 : critical_msg = thread->critical_msg;
1066 341362 : if (spdk_unlikely(critical_msg != NULL)) {
1067 0 : critical_msg(NULL);
1068 0 : thread->critical_msg = NULL;
1069 0 : rc = 1;
1070 0 : }
1071 :
1072 341362 : msg_count = msg_queue_run_batch(thread, max_msgs);
1073 341362 : if (msg_count) {
1074 100978 : rc = 1;
1075 100978 : }
1076 :
1077 343028 : TAILQ_FOREACH_REVERSE_SAFE(poller, &thread->active_pollers,
1078 : active_pollers_head, tailq, tmp) {
1079 1666 : int poller_rc;
1080 :
1081 1666 : poller_rc = thread_execute_poller(thread, poller);
1082 1666 : if (poller_rc > rc) {
1083 242 : rc = poller_rc;
1084 242 : }
1085 1666 : }
1086 :
1087 341362 : poller = thread->first_timed_poller;
1088 341788 : while (poller != NULL) {
1089 2997 : int timer_rc = 0;
1090 :
1091 2997 : if (now < poller->next_run_tick) {
1092 2571 : break;
1093 : }
1094 :
1095 426 : tmp = RB_NEXT(timed_pollers_tree, &thread->timed_pollers, poller);
1096 426 : RB_REMOVE(timed_pollers_tree, &thread->timed_pollers, poller);
1097 :
1098 : /* Update the cache to the next timed poller in the list
1099 : * only if the current poller is still the closest, otherwise,
1100 : * do nothing because the cache has been already updated.
1101 : */
1102 426 : if (thread->first_timed_poller == poller) {
1103 426 : thread->first_timed_poller = tmp;
1104 426 : }
1105 :
1106 426 : timer_rc = thread_execute_timed_poller(thread, poller, now);
1107 426 : if (timer_rc > rc) {
1108 182 : rc = timer_rc;
1109 182 : }
1110 :
1111 426 : poller = tmp;
1112 2997 : }
1113 :
1114 682724 : return rc;
1115 341362 : }
1116 :
1117 : static void
1118 0 : _thread_remove_pollers(void *ctx)
1119 : {
1120 0 : struct spdk_thread *thread = ctx;
1121 0 : struct spdk_poller *poller, *tmp;
1122 :
1123 0 : TAILQ_FOREACH_REVERSE_SAFE(poller, &thread->active_pollers,
1124 : active_pollers_head, tailq, tmp) {
1125 0 : if (poller->state == SPDK_POLLER_STATE_UNREGISTERED) {
1126 0 : TAILQ_REMOVE(&thread->active_pollers, poller, tailq);
1127 0 : free(poller);
1128 0 : }
1129 0 : }
1130 :
1131 0 : RB_FOREACH_SAFE(poller, timed_pollers_tree, &thread->timed_pollers, tmp) {
1132 0 : if (poller->state == SPDK_POLLER_STATE_UNREGISTERED) {
1133 0 : poller_remove_timer(thread, poller);
1134 0 : free(poller);
1135 0 : }
1136 0 : }
1137 :
1138 0 : thread->poller_unregistered = false;
1139 0 : }
1140 :
1141 : static void
1142 0 : _thread_exit(void *ctx)
1143 : {
1144 0 : struct spdk_thread *thread = ctx;
1145 :
1146 0 : assert(thread->state == SPDK_THREAD_STATE_EXITING);
1147 :
1148 0 : thread_exit(thread, spdk_get_ticks());
1149 0 : }
1150 :
1151 : int
1152 341369 : spdk_thread_poll(struct spdk_thread *thread, uint32_t max_msgs, uint64_t now)
1153 : {
1154 341369 : struct spdk_thread *orig_thread;
1155 341369 : int rc;
1156 :
1157 341369 : orig_thread = _get_thread();
1158 341369 : tls_thread = thread;
1159 :
1160 341369 : if (now == 0) {
1161 337054 : now = spdk_get_ticks();
1162 337054 : }
1163 :
1164 341369 : if (spdk_likely(!thread->in_interrupt)) {
1165 341369 : rc = thread_poll(thread, max_msgs, now);
1166 341369 : if (spdk_unlikely(thread->in_interrupt)) {
1167 : /* The thread transitioned to interrupt mode during the above poll.
1168 : * Poll it one more time in case that during the transition time
1169 : * there is msg received without notification.
1170 : */
1171 0 : rc = thread_poll(thread, max_msgs, now);
1172 0 : }
1173 :
1174 341369 : if (spdk_unlikely(thread->state == SPDK_THREAD_STATE_EXITING)) {
1175 201 : thread_exit(thread, now);
1176 201 : }
1177 341369 : } else {
1178 : /* Non-block wait on thread's fd_group */
1179 0 : rc = spdk_fd_group_wait(thread->fgrp, 0);
1180 : }
1181 :
1182 341369 : thread_update_stats(thread, spdk_get_ticks(), now, rc);
1183 :
1184 341369 : tls_thread = orig_thread;
1185 :
1186 682738 : return rc;
1187 341369 : }
1188 :
1189 : uint64_t
1190 0 : spdk_thread_next_poller_expiration(struct spdk_thread *thread)
1191 : {
1192 0 : struct spdk_poller *poller;
1193 :
1194 0 : poller = thread->first_timed_poller;
1195 0 : if (poller) {
1196 0 : return poller->next_run_tick;
1197 : }
1198 :
1199 0 : return 0;
1200 0 : }
1201 :
1202 : int
1203 0 : spdk_thread_has_active_pollers(struct spdk_thread *thread)
1204 : {
1205 0 : return !TAILQ_EMPTY(&thread->active_pollers);
1206 : }
1207 :
1208 : static bool
1209 24 : thread_has_unpaused_pollers(struct spdk_thread *thread)
1210 : {
1211 48 : if (TAILQ_EMPTY(&thread->active_pollers) &&
1212 24 : RB_EMPTY(&thread->timed_pollers)) {
1213 24 : return false;
1214 : }
1215 :
1216 0 : return true;
1217 24 : }
1218 :
1219 : bool
1220 2 : spdk_thread_has_pollers(struct spdk_thread *thread)
1221 : {
1222 4 : if (!thread_has_unpaused_pollers(thread) &&
1223 2 : TAILQ_EMPTY(&thread->paused_pollers)) {
1224 2 : return false;
1225 : }
1226 :
1227 0 : return true;
1228 2 : }
1229 :
1230 : bool
1231 22 : spdk_thread_is_idle(struct spdk_thread *thread)
1232 : {
1233 44 : if (spdk_ring_count(thread->messages) ||
1234 22 : thread_has_unpaused_pollers(thread) ||
1235 22 : thread->critical_msg != NULL) {
1236 0 : return false;
1237 : }
1238 :
1239 22 : return true;
1240 22 : }
1241 :
1242 : uint32_t
1243 13 : spdk_thread_get_count(void)
1244 : {
1245 : /*
1246 : * Return cached value of the current thread count. We could acquire the
1247 : * lock and iterate through the TAILQ of threads to count them, but that
1248 : * count could still be invalidated after we release the lock.
1249 : */
1250 13 : return g_thread_count;
1251 : }
1252 :
1253 : struct spdk_thread *
1254 243701 : spdk_get_thread(void)
1255 : {
1256 243701 : return _get_thread();
1257 : }
1258 :
1259 : const char *
1260 4 : spdk_thread_get_name(const struct spdk_thread *thread)
1261 : {
1262 4 : return thread->name;
1263 : }
1264 :
1265 : uint64_t
1266 16 : spdk_thread_get_id(const struct spdk_thread *thread)
1267 : {
1268 16 : return thread->id;
1269 : }
1270 :
1271 : struct spdk_thread *
1272 14 : spdk_thread_get_by_id(uint64_t id)
1273 : {
1274 14 : struct spdk_thread *thread;
1275 :
1276 14 : if (id == 0 || id >= g_thread_id) {
1277 0 : SPDK_ERRLOG("invalid thread id: %" PRIu64 ".\n", id);
1278 0 : return NULL;
1279 : }
1280 14 : pthread_mutex_lock(&g_devlist_mutex);
1281 30 : TAILQ_FOREACH(thread, &g_threads, tailq) {
1282 30 : if (thread->id == id) {
1283 14 : break;
1284 : }
1285 16 : }
1286 14 : pthread_mutex_unlock(&g_devlist_mutex);
1287 14 : return thread;
1288 14 : }
1289 :
1290 : int
1291 56 : spdk_thread_get_stats(struct spdk_thread_stats *stats)
1292 : {
1293 56 : struct spdk_thread *thread;
1294 :
1295 56 : thread = _get_thread();
1296 56 : if (!thread) {
1297 0 : SPDK_ERRLOG("No thread allocated\n");
1298 0 : return -EINVAL;
1299 : }
1300 :
1301 56 : if (stats == NULL) {
1302 0 : return -EINVAL;
1303 : }
1304 :
1305 56 : *stats = thread->stats;
1306 :
1307 56 : return 0;
1308 56 : }
1309 :
1310 : uint64_t
1311 100997 : spdk_thread_get_last_tsc(struct spdk_thread *thread)
1312 : {
1313 100997 : if (thread == NULL) {
1314 0 : thread = _get_thread();
1315 0 : }
1316 :
1317 100997 : return thread->tsc_last;
1318 : }
1319 :
1320 : static inline int
1321 105661 : thread_send_msg_notification(const struct spdk_thread *target_thread)
1322 : {
1323 105661 : uint64_t notify = 1;
1324 105661 : int rc;
1325 :
1326 : /* Not necessary to do notification if interrupt facility is not enabled */
1327 105661 : if (spdk_likely(!spdk_interrupt_mode_is_enabled())) {
1328 105661 : return 0;
1329 : }
1330 :
1331 : /* When each spdk_thread can switch between poll and interrupt mode dynamically,
1332 : * after sending thread msg, it is necessary to check whether target thread runs in
1333 : * interrupt mode and then decide whether do event notification.
1334 : */
1335 0 : if (spdk_unlikely(target_thread->in_interrupt)) {
1336 0 : rc = write(target_thread->msg_fd, ¬ify, sizeof(notify));
1337 0 : if (rc < 0) {
1338 0 : SPDK_ERRLOG("failed to notify msg_queue: %s.\n", spdk_strerror(errno));
1339 0 : return -EIO;
1340 : }
1341 0 : }
1342 :
1343 0 : return 0;
1344 105661 : }
1345 :
1346 : int
1347 105661 : spdk_thread_send_msg(const struct spdk_thread *thread, spdk_msg_fn fn, void *ctx)
1348 : {
1349 105661 : struct spdk_thread *local_thread;
1350 105661 : struct spdk_msg *msg;
1351 105661 : int rc;
1352 :
1353 105661 : assert(thread != NULL);
1354 :
1355 105661 : if (spdk_unlikely(thread->state == SPDK_THREAD_STATE_EXITED)) {
1356 0 : SPDK_ERRLOG("Thread %s is marked as exited.\n", thread->name);
1357 0 : return -EIO;
1358 : }
1359 :
1360 105661 : local_thread = _get_thread();
1361 :
1362 105661 : msg = NULL;
1363 105661 : if (local_thread != NULL) {
1364 105659 : if (local_thread->msg_cache_count > 0) {
1365 105659 : msg = SLIST_FIRST(&local_thread->msg_cache);
1366 105659 : assert(msg != NULL);
1367 105659 : SLIST_REMOVE_HEAD(&local_thread->msg_cache, link);
1368 105659 : local_thread->msg_cache_count--;
1369 105659 : }
1370 105659 : }
1371 :
1372 105661 : if (msg == NULL) {
1373 2 : msg = spdk_mempool_get(g_spdk_msg_mempool);
1374 2 : if (!msg) {
1375 0 : SPDK_ERRLOG("msg could not be allocated\n");
1376 0 : return -ENOMEM;
1377 : }
1378 2 : }
1379 :
1380 105661 : msg->fn = fn;
1381 105661 : msg->arg = ctx;
1382 :
1383 105661 : rc = spdk_ring_enqueue(thread->messages, (void **)&msg, 1, NULL);
1384 105661 : if (rc != 1) {
1385 0 : SPDK_ERRLOG("msg could not be enqueued\n");
1386 0 : spdk_mempool_put(g_spdk_msg_mempool, msg);
1387 0 : return -EIO;
1388 : }
1389 :
1390 105661 : return thread_send_msg_notification(thread);
1391 105661 : }
1392 :
1393 : int
1394 0 : spdk_thread_send_critical_msg(struct spdk_thread *thread, spdk_msg_fn fn)
1395 : {
1396 0 : spdk_msg_fn expected = NULL;
1397 :
1398 0 : if (!__atomic_compare_exchange_n(&thread->critical_msg, &expected, fn, false, __ATOMIC_SEQ_CST,
1399 : __ATOMIC_SEQ_CST)) {
1400 0 : return -EIO;
1401 : }
1402 :
1403 0 : return thread_send_msg_notification(thread);
1404 0 : }
1405 :
1406 : #ifdef __linux__
1407 : static int
1408 0 : interrupt_timerfd_process(void *arg)
1409 : {
1410 0 : struct spdk_poller *poller = arg;
1411 0 : uint64_t exp;
1412 0 : int rc;
1413 :
1414 : /* clear the level of interval timer */
1415 0 : rc = read(poller->intr->efd, &exp, sizeof(exp));
1416 0 : if (rc < 0) {
1417 0 : if (rc == -EAGAIN) {
1418 0 : return 0;
1419 : }
1420 :
1421 0 : return rc;
1422 : }
1423 :
1424 : SPDK_DTRACE_PROBE2(timerfd_exec, poller->fn, poller->arg);
1425 :
1426 0 : return poller->fn(poller->arg);
1427 0 : }
1428 :
1429 : static int
1430 0 : period_poller_interrupt_init(struct spdk_poller *poller)
1431 : {
1432 0 : int timerfd;
1433 :
1434 0 : SPDK_DEBUGLOG(thread, "timerfd init for periodic poller %s\n", poller->name);
1435 0 : timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC);
1436 0 : if (timerfd < 0) {
1437 0 : return -errno;
1438 : }
1439 :
1440 0 : poller->intr = spdk_interrupt_register(timerfd, interrupt_timerfd_process, poller, poller->name);
1441 0 : if (poller->intr == NULL) {
1442 0 : close(timerfd);
1443 0 : return -1;
1444 : }
1445 :
1446 0 : return 0;
1447 0 : }
1448 :
1449 : static void
1450 0 : period_poller_set_interrupt_mode(struct spdk_poller *poller, void *cb_arg, bool interrupt_mode)
1451 : {
1452 0 : int timerfd;
1453 0 : uint64_t now_tick = spdk_get_ticks();
1454 0 : uint64_t ticks = spdk_get_ticks_hz();
1455 0 : int ret;
1456 0 : struct itimerspec new_tv = {};
1457 0 : struct itimerspec old_tv = {};
1458 :
1459 0 : assert(poller->intr != NULL);
1460 0 : assert(poller->period_ticks != 0);
1461 :
1462 0 : timerfd = poller->intr->efd;
1463 :
1464 0 : assert(timerfd >= 0);
1465 :
1466 0 : SPDK_DEBUGLOG(thread, "timerfd set poller %s into %s mode\n", poller->name,
1467 : interrupt_mode ? "interrupt" : "poll");
1468 :
1469 0 : if (interrupt_mode) {
1470 : /* Set repeated timer expiration */
1471 0 : new_tv.it_interval.tv_sec = poller->period_ticks / ticks;
1472 0 : new_tv.it_interval.tv_nsec = poller->period_ticks % ticks * SPDK_SEC_TO_NSEC / ticks;
1473 :
1474 : /* Update next timer expiration */
1475 0 : if (poller->next_run_tick == 0) {
1476 0 : poller->next_run_tick = now_tick + poller->period_ticks;
1477 0 : } else if (poller->next_run_tick < now_tick) {
1478 0 : poller->next_run_tick = now_tick;
1479 0 : }
1480 :
1481 0 : new_tv.it_value.tv_sec = (poller->next_run_tick - now_tick) / ticks;
1482 0 : new_tv.it_value.tv_nsec = (poller->next_run_tick - now_tick) % ticks * SPDK_SEC_TO_NSEC / ticks;
1483 :
1484 0 : ret = timerfd_settime(timerfd, 0, &new_tv, NULL);
1485 0 : if (ret < 0) {
1486 0 : SPDK_ERRLOG("Failed to arm timerfd: error(%d)\n", errno);
1487 0 : assert(false);
1488 : }
1489 0 : } else {
1490 : /* Disarm the timer */
1491 0 : ret = timerfd_settime(timerfd, 0, &new_tv, &old_tv);
1492 0 : if (ret < 0) {
1493 : /* timerfd_settime's failure indicates that the timerfd is in error */
1494 0 : SPDK_ERRLOG("Failed to disarm timerfd: error(%d)\n", errno);
1495 0 : assert(false);
1496 : }
1497 :
1498 : /* In order to reuse poller_insert_timer, fix now_tick, so next_run_tick would be
1499 : * now_tick + ticks * old_tv.it_value.tv_sec + (ticks * old_tv.it_value.tv_nsec) / SPDK_SEC_TO_NSEC
1500 : */
1501 0 : now_tick = now_tick - poller->period_ticks + ticks * old_tv.it_value.tv_sec + \
1502 0 : (ticks * old_tv.it_value.tv_nsec) / SPDK_SEC_TO_NSEC;
1503 0 : poller_remove_timer(poller->thread, poller);
1504 0 : poller_insert_timer(poller->thread, poller, now_tick);
1505 : }
1506 0 : }
1507 :
1508 : static void
1509 0 : poller_interrupt_fini(struct spdk_poller *poller)
1510 : {
1511 0 : int fd;
1512 :
1513 0 : SPDK_DEBUGLOG(thread, "interrupt fini for poller %s\n", poller->name);
1514 0 : assert(poller->intr != NULL);
1515 0 : fd = poller->intr->efd;
1516 0 : spdk_interrupt_unregister(&poller->intr);
1517 0 : close(fd);
1518 0 : }
1519 :
1520 : static int
1521 0 : busy_poller_interrupt_init(struct spdk_poller *poller)
1522 : {
1523 0 : int busy_efd;
1524 :
1525 0 : SPDK_DEBUGLOG(thread, "busy_efd init for busy poller %s\n", poller->name);
1526 0 : busy_efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1527 0 : if (busy_efd < 0) {
1528 0 : SPDK_ERRLOG("Failed to create eventfd for Poller(%s).\n", poller->name);
1529 0 : return -errno;
1530 : }
1531 :
1532 0 : poller->intr = spdk_interrupt_register(busy_efd, poller->fn, poller->arg, poller->name);
1533 0 : if (poller->intr == NULL) {
1534 0 : close(busy_efd);
1535 0 : return -1;
1536 : }
1537 :
1538 0 : return 0;
1539 0 : }
1540 :
1541 : static void
1542 0 : busy_poller_set_interrupt_mode(struct spdk_poller *poller, void *cb_arg, bool interrupt_mode)
1543 : {
1544 0 : int busy_efd = poller->intr->efd;
1545 0 : uint64_t notify = 1;
1546 0 : int rc __attribute__((unused));
1547 :
1548 0 : assert(busy_efd >= 0);
1549 :
1550 0 : if (interrupt_mode) {
1551 : /* Write without read on eventfd will get it repeatedly triggered. */
1552 0 : if (write(busy_efd, ¬ify, sizeof(notify)) < 0) {
1553 0 : SPDK_ERRLOG("Failed to set busy wait for Poller(%s).\n", poller->name);
1554 0 : }
1555 0 : } else {
1556 : /* Read on eventfd will clear its level triggering. */
1557 0 : rc = read(busy_efd, ¬ify, sizeof(notify));
1558 : }
1559 0 : }
1560 :
1561 : #else
1562 :
1563 : static int
1564 : period_poller_interrupt_init(struct spdk_poller *poller)
1565 : {
1566 : return -ENOTSUP;
1567 : }
1568 :
1569 : static void
1570 : period_poller_set_interrupt_mode(struct spdk_poller *poller, void *cb_arg, bool interrupt_mode)
1571 : {
1572 : }
1573 :
1574 : static void
1575 : poller_interrupt_fini(struct spdk_poller *poller)
1576 : {
1577 : }
1578 :
1579 : static int
1580 : busy_poller_interrupt_init(struct spdk_poller *poller)
1581 : {
1582 : return -ENOTSUP;
1583 : }
1584 :
1585 : static void
1586 : busy_poller_set_interrupt_mode(struct spdk_poller *poller, void *cb_arg, bool interrupt_mode)
1587 : {
1588 : }
1589 :
1590 : #endif
1591 :
1592 : void
1593 0 : spdk_poller_register_interrupt(struct spdk_poller *poller,
1594 : spdk_poller_set_interrupt_mode_cb cb_fn,
1595 : void *cb_arg)
1596 : {
1597 0 : assert(poller != NULL);
1598 0 : assert(cb_fn != NULL);
1599 0 : assert(spdk_get_thread() == poller->thread);
1600 :
1601 0 : if (!spdk_interrupt_mode_is_enabled()) {
1602 0 : return;
1603 : }
1604 :
1605 : /* If this poller already had an interrupt, clean the old one up. */
1606 0 : if (poller->intr != NULL) {
1607 0 : poller_interrupt_fini(poller);
1608 0 : }
1609 :
1610 0 : poller->set_intr_cb_fn = cb_fn;
1611 0 : poller->set_intr_cb_arg = cb_arg;
1612 :
1613 : /* Set poller into interrupt mode if thread is in interrupt. */
1614 0 : if (poller->thread->in_interrupt) {
1615 0 : poller->set_intr_cb_fn(poller, poller->set_intr_cb_arg, true);
1616 0 : }
1617 0 : }
1618 :
1619 : static uint64_t
1620 759 : convert_us_to_ticks(uint64_t us)
1621 : {
1622 759 : uint64_t quotient, remainder, ticks;
1623 :
1624 759 : if (us) {
1625 320 : quotient = us / SPDK_SEC_TO_USEC;
1626 320 : remainder = us % SPDK_SEC_TO_USEC;
1627 320 : ticks = spdk_get_ticks_hz();
1628 :
1629 320 : return ticks * quotient + (ticks * remainder) / SPDK_SEC_TO_USEC;
1630 : } else {
1631 439 : return 0;
1632 : }
1633 759 : }
1634 :
1635 : static struct spdk_poller *
1636 759 : poller_register(spdk_poller_fn fn,
1637 : void *arg,
1638 : uint64_t period_microseconds,
1639 : const char *name)
1640 : {
1641 759 : struct spdk_thread *thread;
1642 759 : struct spdk_poller *poller;
1643 :
1644 759 : thread = spdk_get_thread();
1645 759 : if (!thread) {
1646 0 : assert(false);
1647 : return NULL;
1648 : }
1649 :
1650 759 : if (spdk_unlikely(thread->state == SPDK_THREAD_STATE_EXITED)) {
1651 0 : SPDK_ERRLOG("thread %s is marked as exited\n", thread->name);
1652 0 : return NULL;
1653 : }
1654 :
1655 759 : poller = calloc(1, sizeof(*poller));
1656 759 : if (poller == NULL) {
1657 0 : SPDK_ERRLOG("Poller memory allocation failed\n");
1658 0 : return NULL;
1659 : }
1660 :
1661 759 : if (name) {
1662 708 : snprintf(poller->name, sizeof(poller->name), "%s", name);
1663 708 : } else {
1664 51 : snprintf(poller->name, sizeof(poller->name), "%p", fn);
1665 : }
1666 :
1667 759 : poller->state = SPDK_POLLER_STATE_WAITING;
1668 759 : poller->fn = fn;
1669 759 : poller->arg = arg;
1670 759 : poller->thread = thread;
1671 759 : poller->intr = NULL;
1672 759 : if (thread->next_poller_id == 0) {
1673 0 : SPDK_WARNLOG("Poller ID rolled over. Poller ID is duplicated.\n");
1674 0 : thread->next_poller_id = 1;
1675 0 : }
1676 759 : poller->id = thread->next_poller_id++;
1677 :
1678 759 : poller->period_ticks = convert_us_to_ticks(period_microseconds);
1679 :
1680 759 : if (spdk_interrupt_mode_is_enabled()) {
1681 0 : int rc;
1682 :
1683 0 : if (period_microseconds) {
1684 0 : rc = period_poller_interrupt_init(poller);
1685 0 : if (rc < 0) {
1686 0 : SPDK_ERRLOG("Failed to register interruptfd for periodic poller: %s\n", spdk_strerror(-rc));
1687 0 : free(poller);
1688 0 : return NULL;
1689 : }
1690 :
1691 0 : poller->set_intr_cb_fn = period_poller_set_interrupt_mode;
1692 0 : poller->set_intr_cb_arg = NULL;
1693 :
1694 0 : } else {
1695 : /* If the poller doesn't have a period, create interruptfd that's always
1696 : * busy automatically when running in interrupt mode.
1697 : */
1698 0 : rc = busy_poller_interrupt_init(poller);
1699 0 : if (rc > 0) {
1700 0 : SPDK_ERRLOG("Failed to register interruptfd for busy poller: %s\n", spdk_strerror(-rc));
1701 0 : free(poller);
1702 0 : return NULL;
1703 : }
1704 :
1705 0 : poller->set_intr_cb_fn = busy_poller_set_interrupt_mode;
1706 0 : poller->set_intr_cb_arg = NULL;
1707 : }
1708 :
1709 : /* Set poller into interrupt mode if thread is in interrupt. */
1710 0 : if (poller->thread->in_interrupt) {
1711 0 : poller->set_intr_cb_fn(poller, poller->set_intr_cb_arg, true);
1712 0 : }
1713 0 : }
1714 :
1715 759 : thread_insert_poller(thread, poller);
1716 :
1717 759 : return poller;
1718 759 : }
1719 :
1720 : struct spdk_poller *
1721 51 : spdk_poller_register(spdk_poller_fn fn,
1722 : void *arg,
1723 : uint64_t period_microseconds)
1724 : {
1725 51 : return poller_register(fn, arg, period_microseconds, NULL);
1726 : }
1727 :
1728 : struct spdk_poller *
1729 708 : spdk_poller_register_named(spdk_poller_fn fn,
1730 : void *arg,
1731 : uint64_t period_microseconds,
1732 : const char *name)
1733 : {
1734 708 : return poller_register(fn, arg, period_microseconds, name);
1735 : }
1736 :
1737 : static void
1738 0 : wrong_thread(const char *func, const char *name, struct spdk_thread *thread,
1739 : struct spdk_thread *curthread)
1740 : {
1741 0 : if (thread == NULL) {
1742 0 : SPDK_ERRLOG("%s(%s) called with NULL thread\n", func, name);
1743 0 : abort();
1744 : }
1745 0 : SPDK_ERRLOG("%s(%s) called from wrong thread %s:%" PRIu64 " (should be "
1746 : "%s:%" PRIu64 ")\n", func, name, curthread->name, curthread->id,
1747 : thread->name, thread->id);
1748 0 : assert(false);
1749 : }
1750 :
1751 : void
1752 1154 : spdk_poller_unregister(struct spdk_poller **ppoller)
1753 : {
1754 1154 : struct spdk_thread *thread;
1755 1154 : struct spdk_poller *poller;
1756 :
1757 1154 : poller = *ppoller;
1758 1154 : if (poller == NULL) {
1759 395 : return;
1760 : }
1761 :
1762 759 : *ppoller = NULL;
1763 :
1764 759 : thread = spdk_get_thread();
1765 759 : if (!thread) {
1766 0 : assert(false);
1767 : return;
1768 : }
1769 :
1770 759 : if (poller->thread != thread) {
1771 0 : wrong_thread(__func__, poller->name, poller->thread, thread);
1772 0 : return;
1773 : }
1774 :
1775 759 : if (spdk_interrupt_mode_is_enabled()) {
1776 : /* Release the interrupt resource for period or busy poller */
1777 0 : if (poller->intr != NULL) {
1778 0 : poller_interrupt_fini(poller);
1779 0 : }
1780 :
1781 : /* If there is not already a pending poller removal, generate
1782 : * a message to go process removals. */
1783 0 : if (!thread->poller_unregistered) {
1784 0 : thread->poller_unregistered = true;
1785 0 : spdk_thread_send_msg(thread, _thread_remove_pollers, thread);
1786 0 : }
1787 0 : }
1788 :
1789 : /* If the poller was paused, put it on the active_pollers list so that
1790 : * its unregistration can be processed by spdk_thread_poll().
1791 : */
1792 759 : if (poller->state == SPDK_POLLER_STATE_PAUSED) {
1793 8 : TAILQ_REMOVE(&thread->paused_pollers, poller, tailq);
1794 8 : TAILQ_INSERT_TAIL(&thread->active_pollers, poller, tailq);
1795 8 : poller->period_ticks = 0;
1796 8 : }
1797 :
1798 : /* Simply set the state to unregistered. The poller will get cleaned up
1799 : * in a subsequent call to spdk_thread_poll().
1800 : */
1801 759 : poller->state = SPDK_POLLER_STATE_UNREGISTERED;
1802 1154 : }
1803 :
1804 : void
1805 38 : spdk_poller_pause(struct spdk_poller *poller)
1806 : {
1807 38 : struct spdk_thread *thread;
1808 :
1809 38 : thread = spdk_get_thread();
1810 38 : if (!thread) {
1811 0 : assert(false);
1812 : return;
1813 : }
1814 :
1815 38 : if (poller->thread != thread) {
1816 0 : wrong_thread(__func__, poller->name, poller->thread, thread);
1817 0 : return;
1818 : }
1819 :
1820 : /* We just set its state to SPDK_POLLER_STATE_PAUSING and let
1821 : * spdk_thread_poll() move it. It allows a poller to be paused from
1822 : * another one's context without breaking the TAILQ_FOREACH_REVERSE_SAFE
1823 : * iteration, or from within itself without breaking the logic to always
1824 : * remove the closest timed poller in the TAILQ_FOREACH_SAFE iteration.
1825 : */
1826 38 : switch (poller->state) {
1827 : case SPDK_POLLER_STATE_PAUSED:
1828 : case SPDK_POLLER_STATE_PAUSING:
1829 2 : break;
1830 : case SPDK_POLLER_STATE_RUNNING:
1831 : case SPDK_POLLER_STATE_WAITING:
1832 36 : poller->state = SPDK_POLLER_STATE_PAUSING;
1833 36 : break;
1834 : default:
1835 0 : assert(false);
1836 : break;
1837 : }
1838 38 : }
1839 :
1840 : void
1841 28 : spdk_poller_resume(struct spdk_poller *poller)
1842 : {
1843 28 : struct spdk_thread *thread;
1844 :
1845 28 : thread = spdk_get_thread();
1846 28 : if (!thread) {
1847 0 : assert(false);
1848 : return;
1849 : }
1850 :
1851 28 : if (poller->thread != thread) {
1852 0 : wrong_thread(__func__, poller->name, poller->thread, thread);
1853 0 : return;
1854 : }
1855 :
1856 : /* If a poller is paused it has to be removed from the paused pollers
1857 : * list and put on the active list or timer tree depending on its
1858 : * period_ticks. If a poller is still in the process of being paused,
1859 : * we just need to flip its state back to waiting, as it's already on
1860 : * the appropriate list or tree.
1861 : */
1862 28 : switch (poller->state) {
1863 : case SPDK_POLLER_STATE_PAUSED:
1864 19 : TAILQ_REMOVE(&thread->paused_pollers, poller, tailq);
1865 19 : thread_insert_poller(thread, poller);
1866 : /* fallthrough */
1867 : case SPDK_POLLER_STATE_PAUSING:
1868 27 : poller->state = SPDK_POLLER_STATE_WAITING;
1869 27 : break;
1870 : case SPDK_POLLER_STATE_RUNNING:
1871 : case SPDK_POLLER_STATE_WAITING:
1872 1 : break;
1873 : default:
1874 0 : assert(false);
1875 : break;
1876 : }
1877 28 : }
1878 :
1879 : const char *
1880 0 : spdk_poller_get_name(struct spdk_poller *poller)
1881 : {
1882 0 : return poller->name;
1883 : }
1884 :
1885 : uint64_t
1886 0 : spdk_poller_get_id(struct spdk_poller *poller)
1887 : {
1888 0 : return poller->id;
1889 : }
1890 :
1891 : const char *
1892 0 : spdk_poller_get_state_str(struct spdk_poller *poller)
1893 : {
1894 0 : switch (poller->state) {
1895 : case SPDK_POLLER_STATE_WAITING:
1896 0 : return "waiting";
1897 : case SPDK_POLLER_STATE_RUNNING:
1898 0 : return "running";
1899 : case SPDK_POLLER_STATE_UNREGISTERED:
1900 0 : return "unregistered";
1901 : case SPDK_POLLER_STATE_PAUSING:
1902 0 : return "pausing";
1903 : case SPDK_POLLER_STATE_PAUSED:
1904 0 : return "paused";
1905 : default:
1906 0 : return NULL;
1907 : }
1908 0 : }
1909 :
1910 : uint64_t
1911 0 : spdk_poller_get_period_ticks(struct spdk_poller *poller)
1912 : {
1913 0 : return poller->period_ticks;
1914 : }
1915 :
1916 : void
1917 0 : spdk_poller_get_stats(struct spdk_poller *poller, struct spdk_poller_stats *stats)
1918 : {
1919 0 : stats->run_count = poller->run_count;
1920 0 : stats->busy_count = poller->busy_count;
1921 0 : }
1922 :
1923 : struct spdk_poller *
1924 0 : spdk_thread_get_first_active_poller(struct spdk_thread *thread)
1925 : {
1926 0 : return TAILQ_FIRST(&thread->active_pollers);
1927 : }
1928 :
1929 : struct spdk_poller *
1930 0 : spdk_thread_get_next_active_poller(struct spdk_poller *prev)
1931 : {
1932 0 : return TAILQ_NEXT(prev, tailq);
1933 : }
1934 :
1935 : struct spdk_poller *
1936 0 : spdk_thread_get_first_timed_poller(struct spdk_thread *thread)
1937 : {
1938 0 : return RB_MIN(timed_pollers_tree, &thread->timed_pollers);
1939 : }
1940 :
1941 : struct spdk_poller *
1942 0 : spdk_thread_get_next_timed_poller(struct spdk_poller *prev)
1943 : {
1944 0 : return RB_NEXT(timed_pollers_tree, &thread->timed_pollers, prev);
1945 : }
1946 :
1947 : struct spdk_poller *
1948 0 : spdk_thread_get_first_paused_poller(struct spdk_thread *thread)
1949 : {
1950 0 : return TAILQ_FIRST(&thread->paused_pollers);
1951 : }
1952 :
1953 : struct spdk_poller *
1954 0 : spdk_thread_get_next_paused_poller(struct spdk_poller *prev)
1955 : {
1956 0 : return TAILQ_NEXT(prev, tailq);
1957 : }
1958 :
1959 : struct spdk_io_channel *
1960 0 : spdk_thread_get_first_io_channel(struct spdk_thread *thread)
1961 : {
1962 0 : return RB_MIN(io_channel_tree, &thread->io_channels);
1963 : }
1964 :
1965 : struct spdk_io_channel *
1966 0 : spdk_thread_get_next_io_channel(struct spdk_io_channel *prev)
1967 : {
1968 0 : return RB_NEXT(io_channel_tree, &thread->io_channels, prev);
1969 : }
1970 :
1971 : struct call_thread {
1972 : struct spdk_thread *cur_thread;
1973 : spdk_msg_fn fn;
1974 : void *ctx;
1975 :
1976 : struct spdk_thread *orig_thread;
1977 : spdk_msg_fn cpl;
1978 : };
1979 :
1980 : static void
1981 2 : _back_to_orig_thread(void *ctx)
1982 : {
1983 2 : struct call_thread *ct = ctx;
1984 :
1985 2 : assert(ct->orig_thread->for_each_count > 0);
1986 2 : ct->orig_thread->for_each_count--;
1987 :
1988 2 : ct->cpl(ct->ctx);
1989 2 : free(ctx);
1990 2 : }
1991 :
1992 : static void
1993 6 : _on_thread(void *ctx)
1994 : {
1995 6 : struct call_thread *ct = ctx;
1996 6 : int rc __attribute__((unused));
1997 :
1998 6 : ct->fn(ct->ctx);
1999 :
2000 6 : pthread_mutex_lock(&g_devlist_mutex);
2001 6 : ct->cur_thread = TAILQ_NEXT(ct->cur_thread, tailq);
2002 6 : while (ct->cur_thread && ct->cur_thread->state != SPDK_THREAD_STATE_RUNNING) {
2003 0 : SPDK_DEBUGLOG(thread, "thread %s is not running but still not destroyed.\n",
2004 : ct->cur_thread->name);
2005 0 : ct->cur_thread = TAILQ_NEXT(ct->cur_thread, tailq);
2006 : }
2007 6 : pthread_mutex_unlock(&g_devlist_mutex);
2008 :
2009 6 : if (!ct->cur_thread) {
2010 2 : SPDK_DEBUGLOG(thread, "Completed thread iteration\n");
2011 :
2012 2 : rc = spdk_thread_send_msg(ct->orig_thread, _back_to_orig_thread, ctx);
2013 2 : } else {
2014 4 : SPDK_DEBUGLOG(thread, "Continuing thread iteration to %s\n",
2015 : ct->cur_thread->name);
2016 :
2017 4 : rc = spdk_thread_send_msg(ct->cur_thread, _on_thread, ctx);
2018 : }
2019 6 : assert(rc == 0);
2020 6 : }
2021 :
2022 : void
2023 2 : spdk_for_each_thread(spdk_msg_fn fn, void *ctx, spdk_msg_fn cpl)
2024 : {
2025 2 : struct call_thread *ct;
2026 2 : struct spdk_thread *thread;
2027 2 : int rc __attribute__((unused));
2028 :
2029 2 : ct = calloc(1, sizeof(*ct));
2030 2 : if (!ct) {
2031 0 : SPDK_ERRLOG("Unable to perform thread iteration\n");
2032 0 : cpl(ctx);
2033 0 : return;
2034 : }
2035 :
2036 2 : ct->fn = fn;
2037 2 : ct->ctx = ctx;
2038 2 : ct->cpl = cpl;
2039 :
2040 2 : thread = _get_thread();
2041 2 : if (!thread) {
2042 0 : SPDK_ERRLOG("No thread allocated\n");
2043 0 : free(ct);
2044 0 : cpl(ctx);
2045 0 : return;
2046 : }
2047 2 : ct->orig_thread = thread;
2048 :
2049 2 : ct->orig_thread->for_each_count++;
2050 :
2051 2 : pthread_mutex_lock(&g_devlist_mutex);
2052 2 : ct->cur_thread = TAILQ_FIRST(&g_threads);
2053 2 : pthread_mutex_unlock(&g_devlist_mutex);
2054 :
2055 2 : SPDK_DEBUGLOG(thread, "Starting thread iteration from %s\n",
2056 : ct->orig_thread->name);
2057 :
2058 2 : rc = spdk_thread_send_msg(ct->cur_thread, _on_thread, ct);
2059 2 : assert(rc == 0);
2060 2 : }
2061 :
2062 : static inline void
2063 0 : poller_set_interrupt_mode(struct spdk_poller *poller, bool interrupt_mode)
2064 : {
2065 0 : if (poller->state == SPDK_POLLER_STATE_UNREGISTERED) {
2066 0 : return;
2067 : }
2068 :
2069 0 : if (!poller->set_intr_cb_fn) {
2070 0 : SPDK_ERRLOG("Poller(%s) doesn't support set interrupt mode.\n", poller->name);
2071 0 : assert(false);
2072 : return;
2073 : }
2074 :
2075 0 : poller->set_intr_cb_fn(poller, poller->set_intr_cb_arg, interrupt_mode);
2076 0 : }
2077 :
2078 : void
2079 0 : spdk_thread_set_interrupt_mode(bool enable_interrupt)
2080 : {
2081 0 : struct spdk_thread *thread = _get_thread();
2082 0 : struct spdk_poller *poller, *tmp;
2083 :
2084 0 : assert(thread);
2085 0 : assert(spdk_interrupt_mode_is_enabled());
2086 :
2087 0 : SPDK_NOTICELOG("Set spdk_thread (%s) to %s mode from %s mode.\n",
2088 : thread->name, enable_interrupt ? "intr" : "poll",
2089 : thread->in_interrupt ? "intr" : "poll");
2090 :
2091 0 : if (thread->in_interrupt == enable_interrupt) {
2092 0 : return;
2093 : }
2094 :
2095 : /* Set pollers to expected mode */
2096 0 : RB_FOREACH_SAFE(poller, timed_pollers_tree, &thread->timed_pollers, tmp) {
2097 0 : poller_set_interrupt_mode(poller, enable_interrupt);
2098 0 : }
2099 0 : TAILQ_FOREACH_SAFE(poller, &thread->active_pollers, tailq, tmp) {
2100 0 : poller_set_interrupt_mode(poller, enable_interrupt);
2101 0 : }
2102 : /* All paused pollers will go to work in interrupt mode */
2103 0 : TAILQ_FOREACH_SAFE(poller, &thread->paused_pollers, tailq, tmp) {
2104 0 : poller_set_interrupt_mode(poller, enable_interrupt);
2105 0 : }
2106 :
2107 0 : thread->in_interrupt = enable_interrupt;
2108 0 : return;
2109 0 : }
2110 :
2111 : static struct io_device *
2112 7183 : io_device_get(void *io_device)
2113 : {
2114 7183 : struct io_device find = {};
2115 :
2116 7183 : find.io_device = io_device;
2117 14366 : return RB_FIND(io_device_tree, &g_io_devices, &find);
2118 7183 : }
2119 :
2120 : void
2121 1974 : spdk_io_device_register(void *io_device, spdk_io_channel_create_cb create_cb,
2122 : spdk_io_channel_destroy_cb destroy_cb, uint32_t ctx_size,
2123 : const char *name)
2124 : {
2125 1974 : struct io_device *dev, *tmp;
2126 1974 : struct spdk_thread *thread;
2127 :
2128 1974 : assert(io_device != NULL);
2129 1974 : assert(create_cb != NULL);
2130 1974 : assert(destroy_cb != NULL);
2131 :
2132 1974 : thread = spdk_get_thread();
2133 1974 : if (!thread) {
2134 0 : SPDK_ERRLOG("called from non-SPDK thread\n");
2135 0 : assert(false);
2136 : return;
2137 : }
2138 :
2139 1974 : dev = calloc(1, sizeof(struct io_device));
2140 1974 : if (dev == NULL) {
2141 0 : SPDK_ERRLOG("could not allocate io_device\n");
2142 0 : return;
2143 : }
2144 :
2145 1974 : dev->io_device = io_device;
2146 1974 : if (name) {
2147 1331 : snprintf(dev->name, sizeof(dev->name), "%s", name);
2148 1331 : } else {
2149 643 : snprintf(dev->name, sizeof(dev->name), "%p", dev);
2150 : }
2151 1974 : dev->create_cb = create_cb;
2152 1974 : dev->destroy_cb = destroy_cb;
2153 1974 : dev->unregister_cb = NULL;
2154 1974 : dev->ctx_size = ctx_size;
2155 1974 : dev->for_each_count = 0;
2156 1974 : dev->unregistered = false;
2157 1974 : dev->refcnt = 0;
2158 :
2159 1974 : SPDK_DEBUGLOG(thread, "Registering io_device %s (%p) on thread %s\n",
2160 : dev->name, dev->io_device, thread->name);
2161 :
2162 1974 : pthread_mutex_lock(&g_devlist_mutex);
2163 1974 : tmp = RB_INSERT(io_device_tree, &g_io_devices, dev);
2164 1974 : if (tmp != NULL) {
2165 2 : SPDK_ERRLOG("io_device %p already registered (old:%s new:%s)\n",
2166 : io_device, tmp->name, dev->name);
2167 2 : free(dev);
2168 2 : }
2169 :
2170 1974 : pthread_mutex_unlock(&g_devlist_mutex);
2171 1974 : }
2172 :
2173 : static void
2174 1783 : _finish_unregister(void *arg)
2175 : {
2176 1783 : struct io_device *dev = arg;
2177 1783 : struct spdk_thread *thread;
2178 :
2179 1783 : thread = spdk_get_thread();
2180 1783 : assert(thread == dev->unregister_thread);
2181 :
2182 1783 : SPDK_DEBUGLOG(thread, "Finishing unregistration of io_device %s (%p) on thread %s\n",
2183 : dev->name, dev->io_device, thread->name);
2184 :
2185 1783 : assert(thread->pending_unregister_count > 0);
2186 1783 : thread->pending_unregister_count--;
2187 :
2188 1783 : dev->unregister_cb(dev->io_device);
2189 1783 : free(dev);
2190 1783 : }
2191 :
2192 : static void
2193 1972 : io_device_free(struct io_device *dev)
2194 : {
2195 1972 : int rc __attribute__((unused));
2196 :
2197 1972 : if (dev->unregister_cb == NULL) {
2198 189 : free(dev);
2199 189 : } else {
2200 1783 : assert(dev->unregister_thread != NULL);
2201 1783 : SPDK_DEBUGLOG(thread, "io_device %s (%p) needs to unregister from thread %s\n",
2202 : dev->name, dev->io_device, dev->unregister_thread->name);
2203 1783 : rc = spdk_thread_send_msg(dev->unregister_thread, _finish_unregister, dev);
2204 1783 : assert(rc == 0);
2205 : }
2206 1972 : }
2207 :
2208 : void
2209 1973 : spdk_io_device_unregister(void *io_device, spdk_io_device_unregister_cb unregister_cb)
2210 : {
2211 1973 : struct io_device *dev;
2212 1973 : uint32_t refcnt;
2213 1973 : struct spdk_thread *thread;
2214 :
2215 1973 : thread = spdk_get_thread();
2216 1973 : if (!thread) {
2217 0 : SPDK_ERRLOG("called from non-SPDK thread\n");
2218 0 : assert(false);
2219 : return;
2220 : }
2221 :
2222 1973 : pthread_mutex_lock(&g_devlist_mutex);
2223 1973 : dev = io_device_get(io_device);
2224 1973 : if (!dev) {
2225 0 : SPDK_ERRLOG("io_device %p not found\n", io_device);
2226 0 : assert(false);
2227 : pthread_mutex_unlock(&g_devlist_mutex);
2228 : return;
2229 : }
2230 :
2231 : /* The for_each_count check differentiates the user attempting to unregister the
2232 : * device a second time, from the internal call to this function that occurs
2233 : * after the for_each_count reaches 0.
2234 : */
2235 1973 : if (dev->pending_unregister && dev->for_each_count > 0) {
2236 0 : SPDK_ERRLOG("io_device %p already has a pending unregister\n", io_device);
2237 0 : assert(false);
2238 : pthread_mutex_unlock(&g_devlist_mutex);
2239 : return;
2240 : }
2241 :
2242 1973 : dev->unregister_cb = unregister_cb;
2243 1973 : dev->unregister_thread = thread;
2244 :
2245 1973 : if (dev->for_each_count > 0) {
2246 1 : SPDK_WARNLOG("io_device %s (%p) has %u for_each calls outstanding\n",
2247 : dev->name, io_device, dev->for_each_count);
2248 1 : dev->pending_unregister = true;
2249 1 : pthread_mutex_unlock(&g_devlist_mutex);
2250 1 : return;
2251 : }
2252 :
2253 1972 : dev->unregistered = true;
2254 1972 : RB_REMOVE(io_device_tree, &g_io_devices, dev);
2255 1972 : refcnt = dev->refcnt;
2256 1972 : pthread_mutex_unlock(&g_devlist_mutex);
2257 :
2258 1972 : SPDK_DEBUGLOG(thread, "Unregistering io_device %s (%p) from thread %s\n",
2259 : dev->name, dev->io_device, thread->name);
2260 :
2261 1972 : if (unregister_cb) {
2262 1783 : thread->pending_unregister_count++;
2263 1783 : }
2264 :
2265 1972 : if (refcnt > 0) {
2266 : /* defer deletion */
2267 776 : return;
2268 : }
2269 :
2270 1196 : io_device_free(dev);
2271 1973 : }
2272 :
2273 : const char *
2274 0 : spdk_io_device_get_name(struct io_device *dev)
2275 : {
2276 0 : return dev->name;
2277 : }
2278 :
2279 : static struct spdk_io_channel *
2280 8272 : thread_get_io_channel(struct spdk_thread *thread, struct io_device *dev)
2281 : {
2282 8272 : struct spdk_io_channel find = {};
2283 :
2284 8272 : find.dev = dev;
2285 8272 : return RB_FIND(io_channel_tree, &thread->io_channels, &find);
2286 8272 : }
2287 :
2288 : struct spdk_io_channel *
2289 3827 : spdk_get_io_channel(void *io_device)
2290 : {
2291 3827 : struct spdk_io_channel *ch;
2292 3827 : struct spdk_thread *thread;
2293 3827 : struct io_device *dev;
2294 3827 : int rc;
2295 :
2296 3827 : pthread_mutex_lock(&g_devlist_mutex);
2297 3827 : dev = io_device_get(io_device);
2298 3827 : if (dev == NULL) {
2299 1 : SPDK_ERRLOG("could not find io_device %p\n", io_device);
2300 1 : pthread_mutex_unlock(&g_devlist_mutex);
2301 1 : return NULL;
2302 : }
2303 :
2304 3826 : thread = _get_thread();
2305 3826 : if (!thread) {
2306 0 : SPDK_ERRLOG("No thread allocated\n");
2307 0 : pthread_mutex_unlock(&g_devlist_mutex);
2308 0 : return NULL;
2309 : }
2310 :
2311 3826 : if (spdk_unlikely(thread->state == SPDK_THREAD_STATE_EXITED)) {
2312 0 : SPDK_ERRLOG("Thread %s is marked as exited\n", thread->name);
2313 0 : pthread_mutex_unlock(&g_devlist_mutex);
2314 0 : return NULL;
2315 : }
2316 :
2317 3826 : ch = thread_get_io_channel(thread, dev);
2318 3826 : if (ch != NULL) {
2319 1478 : ch->ref++;
2320 :
2321 1478 : SPDK_DEBUGLOG(thread, "Get io_channel %p for io_device %s (%p) on thread %s refcnt %u\n",
2322 : ch, dev->name, dev->io_device, thread->name, ch->ref);
2323 :
2324 : /*
2325 : * An I/O channel already exists for this device on this
2326 : * thread, so return it.
2327 : */
2328 1478 : pthread_mutex_unlock(&g_devlist_mutex);
2329 1478 : spdk_trace_record(TRACE_THREAD_IOCH_GET, 0, 0,
2330 : (uint64_t)spdk_io_channel_get_ctx(ch), ch->ref);
2331 1478 : return ch;
2332 : }
2333 :
2334 2348 : ch = calloc(1, sizeof(*ch) + dev->ctx_size);
2335 2348 : if (ch == NULL) {
2336 0 : SPDK_ERRLOG("could not calloc spdk_io_channel\n");
2337 0 : pthread_mutex_unlock(&g_devlist_mutex);
2338 0 : return NULL;
2339 : }
2340 :
2341 2348 : ch->dev = dev;
2342 2348 : ch->destroy_cb = dev->destroy_cb;
2343 2348 : ch->thread = thread;
2344 2348 : ch->ref = 1;
2345 2348 : ch->destroy_ref = 0;
2346 2348 : RB_INSERT(io_channel_tree, &thread->io_channels, ch);
2347 :
2348 2348 : SPDK_DEBUGLOG(thread, "Get io_channel %p for io_device %s (%p) on thread %s refcnt %u\n",
2349 : ch, dev->name, dev->io_device, thread->name, ch->ref);
2350 :
2351 2348 : dev->refcnt++;
2352 :
2353 2348 : pthread_mutex_unlock(&g_devlist_mutex);
2354 :
2355 2348 : rc = dev->create_cb(io_device, (uint8_t *)ch + sizeof(*ch));
2356 2348 : if (rc != 0) {
2357 3 : pthread_mutex_lock(&g_devlist_mutex);
2358 3 : RB_REMOVE(io_channel_tree, &ch->thread->io_channels, ch);
2359 3 : dev->refcnt--;
2360 3 : free(ch);
2361 3 : SPDK_ERRLOG("could not create io_channel for io_device %s (%p): %s (rc=%d)\n",
2362 : dev->name, io_device, spdk_strerror(-rc), rc);
2363 3 : pthread_mutex_unlock(&g_devlist_mutex);
2364 3 : return NULL;
2365 : }
2366 :
2367 2345 : spdk_trace_record(TRACE_THREAD_IOCH_GET, 0, 0, (uint64_t)spdk_io_channel_get_ctx(ch), 1);
2368 2345 : return ch;
2369 3827 : }
2370 :
2371 : static void
2372 2349 : put_io_channel(void *arg)
2373 : {
2374 2349 : struct spdk_io_channel *ch = arg;
2375 2349 : bool do_remove_dev = true;
2376 2349 : struct spdk_thread *thread;
2377 :
2378 2349 : thread = spdk_get_thread();
2379 2349 : if (!thread) {
2380 0 : SPDK_ERRLOG("called from non-SPDK thread\n");
2381 0 : assert(false);
2382 : return;
2383 : }
2384 :
2385 2349 : SPDK_DEBUGLOG(thread,
2386 : "Releasing io_channel %p for io_device %s (%p) on thread %s\n",
2387 : ch, ch->dev->name, ch->dev->io_device, thread->name);
2388 :
2389 2349 : assert(ch->thread == thread);
2390 :
2391 2349 : ch->destroy_ref--;
2392 :
2393 2349 : if (ch->ref > 0 || ch->destroy_ref > 0) {
2394 : /*
2395 : * Another reference to the associated io_device was requested
2396 : * after this message was sent but before it had a chance to
2397 : * execute.
2398 : */
2399 4 : return;
2400 : }
2401 :
2402 2345 : pthread_mutex_lock(&g_devlist_mutex);
2403 2345 : RB_REMOVE(io_channel_tree, &ch->thread->io_channels, ch);
2404 2345 : pthread_mutex_unlock(&g_devlist_mutex);
2405 :
2406 : /* Don't hold the devlist mutex while the destroy_cb is called. */
2407 2345 : ch->destroy_cb(ch->dev->io_device, spdk_io_channel_get_ctx(ch));
2408 :
2409 2345 : pthread_mutex_lock(&g_devlist_mutex);
2410 2345 : ch->dev->refcnt--;
2411 :
2412 2345 : if (!ch->dev->unregistered) {
2413 1563 : do_remove_dev = false;
2414 1563 : }
2415 :
2416 2345 : if (ch->dev->refcnt > 0) {
2417 133 : do_remove_dev = false;
2418 133 : }
2419 :
2420 2345 : pthread_mutex_unlock(&g_devlist_mutex);
2421 :
2422 2345 : if (do_remove_dev) {
2423 776 : io_device_free(ch->dev);
2424 776 : }
2425 2345 : free(ch);
2426 2349 : }
2427 :
2428 : void
2429 3824 : spdk_put_io_channel(struct spdk_io_channel *ch)
2430 : {
2431 3824 : struct spdk_thread *thread;
2432 3824 : int rc __attribute__((unused));
2433 :
2434 3824 : spdk_trace_record(TRACE_THREAD_IOCH_PUT, 0, 0,
2435 : (uint64_t)spdk_io_channel_get_ctx(ch), ch->ref);
2436 :
2437 3824 : thread = spdk_get_thread();
2438 3824 : if (!thread) {
2439 0 : SPDK_ERRLOG("called from non-SPDK thread\n");
2440 0 : assert(false);
2441 : return;
2442 : }
2443 :
2444 3824 : if (ch->thread != thread) {
2445 0 : wrong_thread(__func__, "ch", ch->thread, thread);
2446 0 : return;
2447 : }
2448 :
2449 3824 : SPDK_DEBUGLOG(thread,
2450 : "Putting io_channel %p for io_device %s (%p) on thread %s refcnt %u\n",
2451 : ch, ch->dev->name, ch->dev->io_device, thread->name, ch->ref);
2452 :
2453 3824 : ch->ref--;
2454 :
2455 3824 : if (ch->ref == 0) {
2456 2349 : ch->destroy_ref++;
2457 2349 : rc = spdk_thread_send_msg(thread, put_io_channel, ch);
2458 2349 : assert(rc == 0);
2459 2349 : }
2460 3824 : }
2461 :
2462 : struct spdk_io_channel *
2463 444132 : spdk_io_channel_from_ctx(void *ctx)
2464 : {
2465 444132 : return (struct spdk_io_channel *)((uint8_t *)ctx - sizeof(struct spdk_io_channel));
2466 : }
2467 :
2468 : struct spdk_thread *
2469 1958 : spdk_io_channel_get_thread(struct spdk_io_channel *ch)
2470 : {
2471 1958 : return ch->thread;
2472 : }
2473 :
2474 : void *
2475 442023 : spdk_io_channel_get_io_device(struct spdk_io_channel *ch)
2476 : {
2477 442023 : return ch->dev->io_device;
2478 : }
2479 :
2480 : const char *
2481 0 : spdk_io_channel_get_io_device_name(struct spdk_io_channel *ch)
2482 : {
2483 0 : return spdk_io_device_get_name(ch->dev);
2484 : }
2485 :
2486 : int
2487 0 : spdk_io_channel_get_ref_count(struct spdk_io_channel *ch)
2488 : {
2489 0 : return ch->ref;
2490 : }
2491 :
2492 : struct spdk_io_channel_iter {
2493 : void *io_device;
2494 : struct io_device *dev;
2495 : spdk_channel_msg fn;
2496 : int status;
2497 : void *ctx;
2498 : struct spdk_io_channel *ch;
2499 :
2500 : struct spdk_thread *cur_thread;
2501 :
2502 : struct spdk_thread *orig_thread;
2503 : spdk_channel_for_each_cpl cpl;
2504 : };
2505 :
2506 : void *
2507 480 : spdk_io_channel_iter_get_io_device(struct spdk_io_channel_iter *i)
2508 : {
2509 480 : return i->io_device;
2510 : }
2511 :
2512 : struct spdk_io_channel *
2513 923 : spdk_io_channel_iter_get_channel(struct spdk_io_channel_iter *i)
2514 : {
2515 923 : return i->ch;
2516 : }
2517 :
2518 : void *
2519 2110 : spdk_io_channel_iter_get_ctx(struct spdk_io_channel_iter *i)
2520 : {
2521 2110 : return i->ctx;
2522 : }
2523 :
2524 : static void
2525 1380 : _call_completion(void *ctx)
2526 : {
2527 1380 : struct spdk_io_channel_iter *i = ctx;
2528 :
2529 1380 : assert(i->orig_thread->for_each_count > 0);
2530 1380 : i->orig_thread->for_each_count--;
2531 :
2532 1380 : if (i->cpl != NULL) {
2533 1380 : i->cpl(i, i->status);
2534 1380 : }
2535 1380 : free(i);
2536 1380 : }
2537 :
2538 : static void
2539 1412 : _call_channel(void *ctx)
2540 : {
2541 1412 : struct spdk_io_channel_iter *i = ctx;
2542 1412 : struct spdk_io_channel *ch;
2543 :
2544 : /*
2545 : * It is possible that the channel was deleted before this
2546 : * message had a chance to execute. If so, skip calling
2547 : * the fn() on this thread.
2548 : */
2549 1412 : pthread_mutex_lock(&g_devlist_mutex);
2550 1412 : ch = thread_get_io_channel(i->cur_thread, i->dev);
2551 1412 : pthread_mutex_unlock(&g_devlist_mutex);
2552 :
2553 1412 : if (ch) {
2554 1372 : i->fn(i);
2555 1372 : } else {
2556 40 : spdk_for_each_channel_continue(i, 0);
2557 : }
2558 1412 : }
2559 :
2560 : void
2561 1380 : spdk_for_each_channel(void *io_device, spdk_channel_msg fn, void *ctx,
2562 : spdk_channel_for_each_cpl cpl)
2563 : {
2564 1380 : struct spdk_thread *thread;
2565 1380 : struct spdk_io_channel *ch;
2566 1380 : struct spdk_io_channel_iter *i;
2567 1380 : int rc __attribute__((unused));
2568 :
2569 1380 : i = calloc(1, sizeof(*i));
2570 1380 : if (!i) {
2571 0 : SPDK_ERRLOG("Unable to allocate iterator\n");
2572 0 : assert(false);
2573 : return;
2574 : }
2575 :
2576 1380 : i->io_device = io_device;
2577 1380 : i->fn = fn;
2578 1380 : i->ctx = ctx;
2579 1380 : i->cpl = cpl;
2580 1380 : i->orig_thread = _get_thread();
2581 :
2582 1380 : i->orig_thread->for_each_count++;
2583 :
2584 1380 : pthread_mutex_lock(&g_devlist_mutex);
2585 1380 : i->dev = io_device_get(io_device);
2586 1380 : if (i->dev == NULL) {
2587 0 : SPDK_ERRLOG("could not find io_device %p\n", io_device);
2588 0 : assert(false);
2589 : i->status = -ENODEV;
2590 : goto end;
2591 : }
2592 :
2593 : /* Do not allow new for_each operations if we are already waiting to unregister
2594 : * the device for other for_each operations to complete.
2595 : */
2596 1380 : if (i->dev->pending_unregister) {
2597 0 : SPDK_ERRLOG("io_device %p has a pending unregister\n", io_device);
2598 0 : i->status = -ENODEV;
2599 0 : goto end;
2600 : }
2601 :
2602 1690 : TAILQ_FOREACH(thread, &g_threads, tailq) {
2603 1557 : ch = thread_get_io_channel(thread, i->dev);
2604 1557 : if (ch != NULL) {
2605 1247 : ch->dev->for_each_count++;
2606 1247 : i->cur_thread = thread;
2607 1247 : i->ch = ch;
2608 1247 : pthread_mutex_unlock(&g_devlist_mutex);
2609 1247 : rc = spdk_thread_send_msg(thread, _call_channel, i);
2610 1247 : assert(rc == 0);
2611 1247 : return;
2612 : }
2613 443 : }
2614 :
2615 : end:
2616 133 : pthread_mutex_unlock(&g_devlist_mutex);
2617 :
2618 133 : rc = spdk_thread_send_msg(i->orig_thread, _call_completion, i);
2619 133 : assert(rc == 0);
2620 1380 : }
2621 :
2622 : static void
2623 1 : __pending_unregister(void *arg)
2624 : {
2625 1 : struct io_device *dev = arg;
2626 :
2627 1 : assert(dev->pending_unregister);
2628 1 : assert(dev->for_each_count == 0);
2629 1 : spdk_io_device_unregister(dev->io_device, dev->unregister_cb);
2630 1 : }
2631 :
2632 : void
2633 1412 : spdk_for_each_channel_continue(struct spdk_io_channel_iter *i, int status)
2634 : {
2635 1412 : struct spdk_thread *thread;
2636 1412 : struct spdk_io_channel *ch;
2637 1412 : struct io_device *dev;
2638 1412 : int rc __attribute__((unused));
2639 :
2640 1412 : assert(i->cur_thread == spdk_get_thread());
2641 :
2642 1412 : i->status = status;
2643 :
2644 1412 : pthread_mutex_lock(&g_devlist_mutex);
2645 1412 : dev = i->dev;
2646 1412 : if (status) {
2647 11 : goto end;
2648 : }
2649 :
2650 1401 : thread = TAILQ_NEXT(i->cur_thread, tailq);
2651 2713 : while (thread) {
2652 1477 : ch = thread_get_io_channel(thread, dev);
2653 1477 : if (ch != NULL) {
2654 165 : i->cur_thread = thread;
2655 165 : i->ch = ch;
2656 165 : pthread_mutex_unlock(&g_devlist_mutex);
2657 165 : rc = spdk_thread_send_msg(thread, _call_channel, i);
2658 165 : assert(rc == 0);
2659 165 : return;
2660 : }
2661 1312 : thread = TAILQ_NEXT(thread, tailq);
2662 : }
2663 :
2664 : end:
2665 1247 : dev->for_each_count--;
2666 1247 : i->ch = NULL;
2667 1247 : pthread_mutex_unlock(&g_devlist_mutex);
2668 :
2669 1247 : rc = spdk_thread_send_msg(i->orig_thread, _call_completion, i);
2670 1247 : assert(rc == 0);
2671 :
2672 1247 : pthread_mutex_lock(&g_devlist_mutex);
2673 1247 : if (dev->pending_unregister && dev->for_each_count == 0) {
2674 1 : rc = spdk_thread_send_msg(dev->unregister_thread, __pending_unregister, dev);
2675 1 : assert(rc == 0);
2676 1 : }
2677 1247 : pthread_mutex_unlock(&g_devlist_mutex);
2678 1412 : }
2679 :
2680 : static void
2681 0 : thread_interrupt_destroy(struct spdk_thread *thread)
2682 : {
2683 0 : struct spdk_fd_group *fgrp = thread->fgrp;
2684 :
2685 0 : SPDK_INFOLOG(thread, "destroy fgrp for thread (%s)\n", thread->name);
2686 :
2687 0 : if (thread->msg_fd < 0) {
2688 0 : return;
2689 : }
2690 :
2691 0 : spdk_fd_group_remove(fgrp, thread->msg_fd);
2692 0 : close(thread->msg_fd);
2693 0 : thread->msg_fd = -1;
2694 :
2695 0 : spdk_fd_group_destroy(fgrp);
2696 0 : thread->fgrp = NULL;
2697 0 : }
2698 :
2699 : #ifdef __linux__
2700 : static int
2701 0 : thread_interrupt_msg_process(void *arg)
2702 : {
2703 0 : struct spdk_thread *thread = arg;
2704 0 : struct spdk_thread *orig_thread;
2705 0 : uint32_t msg_count;
2706 0 : spdk_msg_fn critical_msg;
2707 0 : int rc = 0;
2708 0 : uint64_t notify = 1;
2709 :
2710 0 : assert(spdk_interrupt_mode_is_enabled());
2711 :
2712 0 : orig_thread = spdk_get_thread();
2713 0 : spdk_set_thread(thread);
2714 :
2715 : /* There may be race between msg_acknowledge and another producer's msg_notify,
2716 : * so msg_acknowledge should be applied ahead. And then check for self's msg_notify.
2717 : * This can avoid msg notification missing.
2718 : */
2719 0 : rc = read(thread->msg_fd, ¬ify, sizeof(notify));
2720 0 : if (rc < 0 && errno != EAGAIN) {
2721 0 : SPDK_ERRLOG("failed to acknowledge msg event: %s.\n", spdk_strerror(errno));
2722 0 : }
2723 :
2724 0 : critical_msg = thread->critical_msg;
2725 0 : if (spdk_unlikely(critical_msg != NULL)) {
2726 0 : critical_msg(NULL);
2727 0 : thread->critical_msg = NULL;
2728 0 : rc = 1;
2729 0 : }
2730 :
2731 0 : msg_count = msg_queue_run_batch(thread, 0);
2732 0 : if (msg_count) {
2733 0 : rc = 1;
2734 0 : }
2735 :
2736 0 : SPIN_ASSERT(thread->lock_count == 0, SPIN_ERR_HOLD_DURING_SWITCH);
2737 0 : if (spdk_unlikely(!thread->in_interrupt)) {
2738 : /* The thread transitioned to poll mode in a msg during the above processing.
2739 : * Clear msg_fd since thread messages will be polled directly in poll mode.
2740 : */
2741 0 : rc = read(thread->msg_fd, ¬ify, sizeof(notify));
2742 0 : if (rc < 0 && errno != EAGAIN) {
2743 0 : SPDK_ERRLOG("failed to acknowledge msg queue: %s.\n", spdk_strerror(errno));
2744 0 : }
2745 0 : }
2746 :
2747 0 : spdk_set_thread(orig_thread);
2748 0 : return rc;
2749 0 : }
2750 :
2751 : static int
2752 0 : thread_interrupt_create(struct spdk_thread *thread)
2753 : {
2754 0 : int rc;
2755 :
2756 0 : SPDK_INFOLOG(thread, "Create fgrp for thread (%s)\n", thread->name);
2757 :
2758 0 : rc = spdk_fd_group_create(&thread->fgrp);
2759 0 : if (rc) {
2760 0 : return rc;
2761 : }
2762 :
2763 0 : thread->msg_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
2764 0 : if (thread->msg_fd < 0) {
2765 0 : rc = -errno;
2766 0 : spdk_fd_group_destroy(thread->fgrp);
2767 0 : thread->fgrp = NULL;
2768 :
2769 0 : return rc;
2770 : }
2771 :
2772 0 : return SPDK_FD_GROUP_ADD(thread->fgrp, thread->msg_fd,
2773 : thread_interrupt_msg_process, thread);
2774 0 : }
2775 : #else
2776 : static int
2777 : thread_interrupt_create(struct spdk_thread *thread)
2778 : {
2779 : return -ENOTSUP;
2780 : }
2781 : #endif
2782 :
2783 : static int
2784 0 : _interrupt_wrapper(void *ctx)
2785 : {
2786 0 : struct spdk_interrupt *intr = ctx;
2787 0 : struct spdk_thread *orig_thread, *thread;
2788 0 : int rc;
2789 :
2790 0 : orig_thread = spdk_get_thread();
2791 0 : thread = intr->thread;
2792 :
2793 0 : spdk_set_thread(thread);
2794 :
2795 : SPDK_DTRACE_PROBE4(interrupt_fd_process, intr->name, intr->efd,
2796 : intr->fn, intr->arg);
2797 :
2798 0 : rc = intr->fn(intr->arg);
2799 :
2800 0 : SPIN_ASSERT(thread->lock_count == 0, SPIN_ERR_HOLD_DURING_SWITCH);
2801 :
2802 0 : spdk_set_thread(orig_thread);
2803 :
2804 0 : return rc;
2805 0 : }
2806 :
2807 : struct spdk_interrupt *
2808 0 : spdk_interrupt_register(int efd, spdk_interrupt_fn fn,
2809 : void *arg, const char *name)
2810 : {
2811 0 : struct spdk_thread *thread;
2812 0 : struct spdk_interrupt *intr;
2813 0 : int ret;
2814 :
2815 0 : thread = spdk_get_thread();
2816 0 : if (!thread) {
2817 0 : assert(false);
2818 : return NULL;
2819 : }
2820 :
2821 0 : if (spdk_unlikely(thread->state != SPDK_THREAD_STATE_RUNNING)) {
2822 0 : SPDK_ERRLOG("thread %s is marked as exited\n", thread->name);
2823 0 : return NULL;
2824 : }
2825 :
2826 0 : intr = calloc(1, sizeof(*intr));
2827 0 : if (intr == NULL) {
2828 0 : SPDK_ERRLOG("Interrupt handler allocation failed\n");
2829 0 : return NULL;
2830 : }
2831 :
2832 0 : if (name) {
2833 0 : snprintf(intr->name, sizeof(intr->name), "%s", name);
2834 0 : } else {
2835 0 : snprintf(intr->name, sizeof(intr->name), "%p", fn);
2836 : }
2837 :
2838 0 : intr->efd = efd;
2839 0 : intr->thread = thread;
2840 0 : intr->fn = fn;
2841 0 : intr->arg = arg;
2842 :
2843 0 : ret = spdk_fd_group_add(thread->fgrp, efd, _interrupt_wrapper, intr, intr->name);
2844 :
2845 0 : if (ret != 0) {
2846 0 : SPDK_ERRLOG("thread %s: failed to add fd %d: %s\n",
2847 : thread->name, efd, spdk_strerror(-ret));
2848 0 : free(intr);
2849 0 : return NULL;
2850 : }
2851 :
2852 0 : return intr;
2853 0 : }
2854 :
2855 : void
2856 0 : spdk_interrupt_unregister(struct spdk_interrupt **pintr)
2857 : {
2858 0 : struct spdk_thread *thread;
2859 0 : struct spdk_interrupt *intr;
2860 :
2861 0 : intr = *pintr;
2862 0 : if (intr == NULL) {
2863 0 : return;
2864 : }
2865 :
2866 0 : *pintr = NULL;
2867 :
2868 0 : thread = spdk_get_thread();
2869 0 : if (!thread) {
2870 0 : assert(false);
2871 : return;
2872 : }
2873 :
2874 0 : if (intr->thread != thread) {
2875 0 : wrong_thread(__func__, intr->name, intr->thread, thread);
2876 0 : return;
2877 : }
2878 :
2879 0 : spdk_fd_group_remove(thread->fgrp, intr->efd);
2880 0 : free(intr);
2881 0 : }
2882 :
2883 : int
2884 0 : spdk_interrupt_set_event_types(struct spdk_interrupt *intr,
2885 : enum spdk_interrupt_event_types event_types)
2886 : {
2887 0 : struct spdk_thread *thread;
2888 :
2889 0 : thread = spdk_get_thread();
2890 0 : if (!thread) {
2891 0 : assert(false);
2892 : return -EINVAL;
2893 : }
2894 :
2895 0 : if (intr->thread != thread) {
2896 0 : wrong_thread(__func__, intr->name, intr->thread, thread);
2897 0 : return -EINVAL;
2898 : }
2899 :
2900 0 : return spdk_fd_group_event_modify(thread->fgrp, intr->efd, event_types);
2901 0 : }
2902 :
2903 : int
2904 0 : spdk_thread_get_interrupt_fd(struct spdk_thread *thread)
2905 : {
2906 0 : return spdk_fd_group_get_fd(thread->fgrp);
2907 : }
2908 :
2909 : struct spdk_fd_group *
2910 0 : spdk_thread_get_interrupt_fd_group(struct spdk_thread *thread)
2911 : {
2912 0 : return thread->fgrp;
2913 : }
2914 :
2915 : static bool g_interrupt_mode = false;
2916 :
2917 : int
2918 0 : spdk_interrupt_mode_enable(void)
2919 : {
2920 : /* It must be called once prior to initializing the threading library.
2921 : * g_spdk_msg_mempool will be valid if thread library is initialized.
2922 : */
2923 0 : if (g_spdk_msg_mempool) {
2924 0 : SPDK_ERRLOG("Failed due to threading library is already initialized.\n");
2925 0 : return -1;
2926 : }
2927 :
2928 : #ifdef __linux__
2929 0 : SPDK_NOTICELOG("Set SPDK running in interrupt mode.\n");
2930 0 : g_interrupt_mode = true;
2931 0 : return 0;
2932 : #else
2933 : SPDK_ERRLOG("SPDK interrupt mode supports only Linux platform now.\n");
2934 : g_interrupt_mode = false;
2935 : return -ENOTSUP;
2936 : #endif
2937 0 : }
2938 :
2939 : bool
2940 107822 : spdk_interrupt_mode_is_enabled(void)
2941 : {
2942 107822 : return g_interrupt_mode;
2943 : }
2944 :
2945 : #define SSPIN_DEBUG_STACK_FRAMES 16
2946 :
2947 : struct sspin_stack {
2948 : void *addrs[SSPIN_DEBUG_STACK_FRAMES];
2949 : uint32_t depth;
2950 : };
2951 :
2952 : struct spdk_spinlock_internal {
2953 : struct sspin_stack init_stack;
2954 : struct sspin_stack lock_stack;
2955 : struct sspin_stack unlock_stack;
2956 : };
2957 :
2958 : static void
2959 1185 : sspin_init_internal(struct spdk_spinlock *sspin)
2960 : {
2961 : #ifdef DEBUG
2962 1185 : sspin->internal = calloc(1, sizeof(*sspin->internal));
2963 : #endif
2964 1185 : }
2965 :
2966 : static void
2967 1175 : sspin_fini_internal(struct spdk_spinlock *sspin)
2968 : {
2969 : #ifdef DEBUG
2970 1175 : free(sspin->internal);
2971 1175 : sspin->internal = NULL;
2972 : #endif
2973 1175 : }
2974 :
2975 : #if defined(DEBUG) && defined(SPDK_HAVE_EXECINFO_H)
2976 : #define SSPIN_GET_STACK(sspin, which) \
2977 : do { \
2978 : if (sspin->internal != NULL) { \
2979 : struct sspin_stack *stack = &sspin->internal->which ## _stack; \
2980 : stack->depth = backtrace(stack->addrs, SPDK_COUNTOF(stack->addrs)); \
2981 : } \
2982 : } while (0)
2983 : #else
2984 : #define SSPIN_GET_STACK(sspin, which) do { } while (0)
2985 : #endif
2986 :
2987 : static void
2988 15 : sspin_stack_print(const char *title, const struct sspin_stack *sspin_stack)
2989 : {
2990 : #ifdef SPDK_HAVE_EXECINFO_H
2991 : char **stack;
2992 : size_t i;
2993 :
2994 : stack = backtrace_symbols(sspin_stack->addrs, sspin_stack->depth);
2995 : if (stack == NULL) {
2996 : SPDK_ERRLOG("Out of memory while allocate stack for %s\n", title);
2997 : return;
2998 : }
2999 : SPDK_ERRLOG(" %s:\n", title);
3000 : for (i = 0; i < sspin_stack->depth; i++) {
3001 : /*
3002 : * This does not print line numbers. In gdb, use something like "list *0x444b6b" or
3003 : * "list *sspin_stack->addrs[0]". Or more conveniently, load the spdk gdb macros
3004 : * and use use "print *sspin" or "print sspin->internal.lock_stack". See
3005 : * gdb_macros.md in the docs directory for details.
3006 : */
3007 : SPDK_ERRLOG(" #%" PRIu64 ": %s\n", i, stack[i]);
3008 : }
3009 : free(stack);
3010 : #endif /* SPDK_HAVE_EXECINFO_H */
3011 15 : }
3012 :
3013 : static void
3014 5 : sspin_stacks_print(const struct spdk_spinlock *sspin)
3015 : {
3016 5 : if (sspin->internal == NULL) {
3017 0 : return;
3018 : }
3019 5 : SPDK_ERRLOG("spinlock %p\n", sspin);
3020 5 : sspin_stack_print("Lock initalized at", &sspin->internal->init_stack);
3021 5 : sspin_stack_print("Last locked at", &sspin->internal->lock_stack);
3022 5 : sspin_stack_print("Last unlocked at", &sspin->internal->unlock_stack);
3023 5 : }
3024 :
3025 : void
3026 1185 : spdk_spin_init(struct spdk_spinlock *sspin)
3027 : {
3028 1185 : int rc;
3029 :
3030 1185 : memset(sspin, 0, sizeof(*sspin));
3031 1185 : rc = pthread_spin_init(&sspin->spinlock, PTHREAD_PROCESS_PRIVATE);
3032 1185 : SPIN_ASSERT_LOG_STACKS(rc == 0, SPIN_ERR_PTHREAD, sspin);
3033 1185 : sspin_init_internal(sspin);
3034 1185 : SSPIN_GET_STACK(sspin, init);
3035 1185 : sspin->initialized = true;
3036 1185 : }
3037 :
3038 : void
3039 1176 : spdk_spin_destroy(struct spdk_spinlock *sspin)
3040 : {
3041 1176 : int rc;
3042 :
3043 1176 : SPIN_ASSERT_LOG_STACKS(!sspin->destroyed, SPIN_ERR_DESTROYED, sspin);
3044 1176 : SPIN_ASSERT_LOG_STACKS(sspin->initialized, SPIN_ERR_NOT_INITIALIZED, sspin);
3045 1176 : SPIN_ASSERT_LOG_STACKS(sspin->thread == NULL, SPIN_ERR_LOCK_HELD, sspin);
3046 :
3047 1175 : rc = pthread_spin_destroy(&sspin->spinlock);
3048 1175 : SPIN_ASSERT_LOG_STACKS(rc == 0, SPIN_ERR_PTHREAD, sspin);
3049 :
3050 1175 : sspin_fini_internal(sspin);
3051 1175 : sspin->initialized = false;
3052 1175 : sspin->destroyed = true;
3053 1176 : }
3054 :
3055 : void
3056 25078 : spdk_spin_lock(struct spdk_spinlock *sspin)
3057 : {
3058 25078 : struct spdk_thread *thread = spdk_get_thread();
3059 25078 : int rc;
3060 :
3061 25078 : SPIN_ASSERT_LOG_STACKS(!sspin->destroyed, SPIN_ERR_DESTROYED, sspin);
3062 25078 : SPIN_ASSERT_LOG_STACKS(sspin->initialized, SPIN_ERR_NOT_INITIALIZED, sspin);
3063 25078 : SPIN_ASSERT_LOG_STACKS(thread != NULL, SPIN_ERR_NOT_SPDK_THREAD, sspin);
3064 25077 : SPIN_ASSERT_LOG_STACKS(thread != sspin->thread, SPIN_ERR_DEADLOCK, sspin);
3065 :
3066 25076 : rc = pthread_spin_lock(&sspin->spinlock);
3067 25076 : SPIN_ASSERT_LOG_STACKS(rc == 0, SPIN_ERR_PTHREAD, sspin);
3068 :
3069 25076 : sspin->thread = thread;
3070 25076 : sspin->thread->lock_count++;
3071 :
3072 25076 : SSPIN_GET_STACK(sspin, lock);
3073 25078 : }
3074 :
3075 : void
3076 25078 : spdk_spin_unlock(struct spdk_spinlock *sspin)
3077 : {
3078 25078 : struct spdk_thread *thread = spdk_get_thread();
3079 25078 : int rc;
3080 :
3081 25078 : SPIN_ASSERT_LOG_STACKS(!sspin->destroyed, SPIN_ERR_DESTROYED, sspin);
3082 25078 : SPIN_ASSERT_LOG_STACKS(sspin->initialized, SPIN_ERR_NOT_INITIALIZED, sspin);
3083 25078 : SPIN_ASSERT_LOG_STACKS(thread != NULL, SPIN_ERR_NOT_SPDK_THREAD, sspin);
3084 25078 : SPIN_ASSERT_LOG_STACKS(thread == sspin->thread, SPIN_ERR_WRONG_THREAD, sspin);
3085 :
3086 25076 : SPIN_ASSERT_LOG_STACKS(thread->lock_count > 0, SPIN_ERR_LOCK_COUNT, sspin);
3087 25076 : thread->lock_count--;
3088 25076 : sspin->thread = NULL;
3089 :
3090 25076 : SSPIN_GET_STACK(sspin, unlock);
3091 :
3092 25076 : rc = pthread_spin_unlock(&sspin->spinlock);
3093 25076 : SPIN_ASSERT_LOG_STACKS(rc == 0, SPIN_ERR_PTHREAD, sspin);
3094 25078 : }
3095 :
3096 : bool
3097 29692 : spdk_spin_held(struct spdk_spinlock *sspin)
3098 : {
3099 29692 : struct spdk_thread *thread = spdk_get_thread();
3100 :
3101 29692 : SPIN_ASSERT_RETURN(thread != NULL, SPIN_ERR_NOT_SPDK_THREAD, false);
3102 :
3103 29691 : return sspin->thread == thread;
3104 29692 : }
3105 :
3106 41 : SPDK_LOG_REGISTER_COMPONENT(thread)
|