Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2020 Intel Corporation.
3 : * Copyright (c) 2021 Mellanox Technologies LTD.
4 : * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
5 : * All rights reserved.
6 : */
7 :
8 : #include "nvme_internal.h"
9 :
10 : struct spdk_nvme_poll_group *
11 8 : spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table)
12 : {
13 : struct spdk_nvme_poll_group *group;
14 :
15 8 : group = calloc(1, sizeof(*group));
16 8 : if (group == NULL) {
17 1 : return NULL;
18 : }
19 :
20 7 : group->accel_fn_table.table_size = sizeof(struct spdk_nvme_accel_fn_table);
21 7 : if (table && table->table_size != 0) {
22 0 : group->accel_fn_table.table_size = table->table_size;
23 : #define SET_FIELD(field) \
24 : if (offsetof(struct spdk_nvme_accel_fn_table, field) + sizeof(table->field) <= table->table_size) { \
25 : group->accel_fn_table.field = table->field; \
26 : } \
27 :
28 0 : SET_FIELD(append_crc32c);
29 0 : SET_FIELD(append_copy);
30 0 : SET_FIELD(finish_sequence);
31 0 : SET_FIELD(reverse_sequence);
32 0 : SET_FIELD(abort_sequence);
33 : /* Do not remove this statement, you should always update this statement when you adding a new field,
34 : * and do not forget to add the SET_FIELD statement for your added field. */
35 : SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_accel_fn_table) == 56, "Incorrect size");
36 :
37 : #undef SET_FIELD
38 0 : }
39 :
40 : /* Make sure either all or none of the sequence manipulation callbacks are implemented */
41 14 : if ((group->accel_fn_table.finish_sequence && group->accel_fn_table.reverse_sequence &&
42 7 : group->accel_fn_table.abort_sequence) !=
43 7 : (group->accel_fn_table.finish_sequence || group->accel_fn_table.reverse_sequence ||
44 7 : group->accel_fn_table.abort_sequence)) {
45 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: either all or none of the "
46 : "sequence callbacks must be provided\n");
47 0 : free(group);
48 0 : return NULL;
49 : }
50 :
51 : /* Make sure that sequence callbacks are implemented if append* callbacks are provided */
52 7 : if ((group->accel_fn_table.append_crc32c || group->accel_fn_table.append_copy) &&
53 7 : !group->accel_fn_table.finish_sequence) {
54 0 : SPDK_ERRLOG("Invalid accel_fn_table configuration: append_crc32c and/or append_copy require sequence "
55 : "callbacks to be provided\n");
56 0 : free(group);
57 0 : return NULL;
58 : }
59 :
60 7 : group->ctx = ctx;
61 7 : STAILQ_INIT(&group->tgroups);
62 :
63 7 : return group;
64 8 : }
65 :
66 : struct spdk_nvme_poll_group *
67 0 : spdk_nvme_qpair_get_optimal_poll_group(struct spdk_nvme_qpair *qpair)
68 : {
69 : struct spdk_nvme_transport_poll_group *tgroup;
70 :
71 0 : tgroup = nvme_transport_qpair_get_optimal_poll_group(qpair->transport, qpair);
72 :
73 0 : if (tgroup == NULL) {
74 0 : return NULL;
75 : }
76 :
77 0 : return tgroup->group;
78 0 : }
79 :
80 : int
81 10 : spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
82 : {
83 : struct spdk_nvme_transport_poll_group *tgroup;
84 : const struct spdk_nvme_transport *transport;
85 :
86 10 : if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISCONNECTED) {
87 1 : return -EINVAL;
88 : }
89 :
90 19 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
91 12 : if (tgroup->transport == qpair->transport) {
92 2 : break;
93 : }
94 10 : }
95 :
96 : /* See if a new transport has been added (dlopen style) and we need to update the poll group */
97 9 : if (!tgroup) {
98 7 : transport = nvme_get_first_transport();
99 17 : while (transport != NULL) {
100 15 : if (transport == qpair->transport) {
101 5 : tgroup = nvme_transport_poll_group_create(transport);
102 5 : if (tgroup == NULL) {
103 0 : return -ENOMEM;
104 : }
105 5 : tgroup->group = group;
106 5 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
107 5 : break;
108 : }
109 10 : transport = nvme_get_next_transport(transport);
110 : }
111 7 : }
112 :
113 9 : return tgroup ? nvme_transport_poll_group_add(tgroup, qpair) : -ENODEV;
114 10 : }
115 :
116 : int
117 7 : spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
118 : {
119 : struct spdk_nvme_transport_poll_group *tgroup;
120 :
121 15 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
122 14 : if (tgroup->transport == qpair->transport) {
123 6 : return nvme_transport_poll_group_remove(tgroup, qpair);
124 : }
125 8 : }
126 :
127 1 : return -ENODEV;
128 7 : }
129 :
130 : int
131 1 : nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
132 : {
133 1 : return nvme_transport_poll_group_connect_qpair(qpair);
134 : }
135 :
136 : int
137 0 : nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
138 : {
139 0 : return nvme_transport_poll_group_disconnect_qpair(qpair);
140 : }
141 :
142 : int64_t
143 2 : spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
144 : uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
145 : {
146 : struct spdk_nvme_transport_poll_group *tgroup;
147 2 : int64_t local_completions = 0, error_reason = 0, num_completions = 0;
148 :
149 2 : if (disconnected_qpair_cb == NULL) {
150 0 : return -EINVAL;
151 : }
152 :
153 2 : if (spdk_unlikely(group->in_process_completions)) {
154 0 : return 0;
155 : }
156 2 : group->in_process_completions = true;
157 :
158 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
159 2 : local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
160 1 : disconnected_qpair_cb);
161 1 : if (local_completions < 0 && error_reason == 0) {
162 0 : error_reason = local_completions;
163 0 : } else {
164 1 : num_completions += local_completions;
165 : /* Just to be safe */
166 1 : assert(num_completions >= 0);
167 : }
168 1 : }
169 2 : group->in_process_completions = false;
170 :
171 2 : return error_reason ? error_reason : num_completions;
172 2 : }
173 :
174 : int
175 0 : spdk_nvme_poll_group_all_connected(struct spdk_nvme_poll_group *group)
176 : {
177 : struct spdk_nvme_transport_poll_group *tgroup;
178 : struct spdk_nvme_qpair *qpair;
179 0 : int rc = 0;
180 :
181 0 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
182 0 : if (!STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
183 : /* Treat disconnected qpairs as highest priority for notification.
184 : * This means we can just return immediately here.
185 : */
186 0 : return -EIO;
187 : }
188 0 : STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
189 0 : if (nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTING) {
190 0 : return -EIO;
191 0 : } else if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
192 0 : rc = -EAGAIN;
193 : /* Break so that we can check the remaining transport groups,
194 : * in case any of them have a disconnected qpair.
195 : */
196 0 : break;
197 : }
198 0 : }
199 0 : }
200 :
201 0 : return rc;
202 0 : }
203 :
204 : void *
205 0 : spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group)
206 : {
207 0 : return group->ctx;
208 : }
209 :
210 : int
211 8 : spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
212 : {
213 : struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
214 :
215 9 : STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
216 2 : STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
217 2 : if (nvme_transport_poll_group_destroy(tgroup) != 0) {
218 1 : STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
219 1 : return -EBUSY;
220 : }
221 :
222 1 : }
223 :
224 7 : free(group);
225 :
226 7 : return 0;
227 8 : }
228 :
229 : int
230 2 : spdk_nvme_poll_group_get_stats(struct spdk_nvme_poll_group *group,
231 : struct spdk_nvme_poll_group_stat **stats)
232 : {
233 : struct spdk_nvme_transport_poll_group *tgroup;
234 : struct spdk_nvme_poll_group_stat *result;
235 2 : uint32_t transports_count = 0;
236 : /* Not all transports used by this poll group may support statistics reporting */
237 2 : uint32_t reported_stats_count = 0;
238 : int rc;
239 :
240 2 : assert(group);
241 2 : assert(stats);
242 :
243 2 : result = calloc(1, sizeof(*result));
244 2 : if (!result) {
245 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
246 0 : return -ENOMEM;
247 : }
248 :
249 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
250 3 : transports_count++;
251 3 : }
252 :
253 2 : result->transport_stat = calloc(transports_count, sizeof(*result->transport_stat));
254 2 : if (!result->transport_stat) {
255 0 : SPDK_ERRLOG("Failed to allocate memory for poll group statistics\n");
256 0 : free(result);
257 0 : return -ENOMEM;
258 : }
259 :
260 5 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
261 3 : rc = nvme_transport_poll_group_get_stats(tgroup, &result->transport_stat[reported_stats_count]);
262 3 : if (rc == 0) {
263 3 : reported_stats_count++;
264 3 : }
265 3 : }
266 :
267 2 : if (reported_stats_count == 0) {
268 1 : free(result->transport_stat);
269 1 : free(result);
270 1 : SPDK_DEBUGLOG(nvme, "No transport statistics available\n");
271 1 : return -ENOTSUP;
272 : }
273 :
274 1 : result->num_transports = reported_stats_count;
275 1 : *stats = result;
276 :
277 1 : return 0;
278 2 : }
279 :
280 : void
281 1 : spdk_nvme_poll_group_free_stats(struct spdk_nvme_poll_group *group,
282 : struct spdk_nvme_poll_group_stat *stat)
283 : {
284 : struct spdk_nvme_transport_poll_group *tgroup;
285 : uint32_t i;
286 1 : uint32_t freed_stats __attribute__((unused)) = 0;
287 :
288 1 : assert(group);
289 1 : assert(stat);
290 :
291 4 : for (i = 0; i < stat->num_transports; i++) {
292 3 : STAILQ_FOREACH(tgroup, &group->tgroups, link) {
293 3 : if (nvme_transport_get_trtype(tgroup->transport) == stat->transport_stat[i]->trtype) {
294 3 : nvme_transport_poll_group_free_stats(tgroup, stat->transport_stat[i]);
295 3 : freed_stats++;
296 3 : break;
297 : }
298 0 : }
299 3 : }
300 :
301 1 : assert(freed_stats == stat->num_transports);
302 :
303 1 : free(stat->transport_stat);
304 1 : free(stat);
305 1 : }
|