Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/accel.h"
8 : #include "spdk/stdinc.h"
9 : #include "spdk/crc32.h"
10 : #include "spdk/endian.h"
11 : #include "spdk/assert.h"
12 : #include "spdk/thread.h"
13 : #include "spdk/nvmf_transport.h"
14 : #include "spdk/string.h"
15 : #include "spdk/trace.h"
16 : #include "spdk/util.h"
17 : #include "spdk/log.h"
18 : #include "spdk/keyring.h"
19 :
20 : #include "spdk_internal/assert.h"
21 : #include "spdk_internal/nvme_tcp.h"
22 : #include "spdk_internal/sock.h"
23 :
24 : #include "nvmf_internal.h"
25 :
26 : #include "spdk_internal/trace_defs.h"
27 :
28 : #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16
29 : #define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16
30 : #define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0
31 : #define SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM 32
32 : #define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true
33 :
34 : #define SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH 2
35 : #define SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH 65535
36 : #define SPDK_NVMF_TCP_MIN_ADMIN_QUEUE_DEPTH 2
37 : #define SPDK_NVMF_TCP_MAX_ADMIN_QUEUE_DEPTH 4096
38 :
39 : #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH 128
40 : #define SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH 128
41 : #define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
42 : #define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
43 : #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072
44 : #define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072
45 : #define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 511
46 : #define SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE UINT32_MAX
47 : #define SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP false
48 : #define SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC 1
49 :
50 : #define TCP_PSK_INVALID_PERMISSIONS 0177
51 :
52 : const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp;
53 : static bool g_tls_log = false;
54 :
55 : /* spdk nvmf related structure */
56 : enum spdk_nvmf_tcp_req_state {
57 :
58 : /* The request is not currently in use */
59 : TCP_REQUEST_STATE_FREE = 0,
60 :
61 : /* Initial state when request first received */
62 : TCP_REQUEST_STATE_NEW = 1,
63 :
64 : /* The request is queued until a data buffer is available. */
65 : TCP_REQUEST_STATE_NEED_BUFFER = 2,
66 :
67 : /* The request is waiting for zcopy_start to finish */
68 : TCP_REQUEST_STATE_AWAITING_ZCOPY_START = 3,
69 :
70 : /* The request has received a zero-copy buffer */
71 : TCP_REQUEST_STATE_ZCOPY_START_COMPLETED = 4,
72 :
73 : /* The request is currently transferring data from the host to the controller. */
74 : TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER = 5,
75 :
76 : /* The request is waiting for the R2T send acknowledgement. */
77 : TCP_REQUEST_STATE_AWAITING_R2T_ACK = 6,
78 :
79 : /* The request is ready to execute at the block device */
80 : TCP_REQUEST_STATE_READY_TO_EXECUTE = 7,
81 :
82 : /* The request is currently executing at the block device */
83 : TCP_REQUEST_STATE_EXECUTING = 8,
84 :
85 : /* The request is waiting for zcopy buffers to be committed */
86 : TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT = 9,
87 :
88 : /* The request finished executing at the block device */
89 : TCP_REQUEST_STATE_EXECUTED = 10,
90 :
91 : /* The request is ready to send a completion */
92 : TCP_REQUEST_STATE_READY_TO_COMPLETE = 11,
93 :
94 : /* The request is currently transferring final pdus from the controller to the host. */
95 : TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST = 12,
96 :
97 : /* The request is waiting for zcopy buffers to be released (without committing) */
98 : TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE = 13,
99 :
100 : /* The request completed and can be marked free. */
101 : TCP_REQUEST_STATE_COMPLETED = 14,
102 :
103 : /* Terminator */
104 : TCP_REQUEST_NUM_STATES,
105 : };
106 :
107 : static const char *spdk_nvmf_tcp_term_req_fes_str[] = {
108 : "Invalid PDU Header Field",
109 : "PDU Sequence Error",
110 : "Header Digiest Error",
111 : "Data Transfer Out of Range",
112 : "R2T Limit Exceeded",
113 : "Unsupported parameter",
114 : };
115 :
116 1 : SPDK_TRACE_REGISTER_FN(nvmf_tcp_trace, "nvmf_tcp", TRACE_GROUP_NVMF_TCP)
117 : {
118 0 : spdk_trace_register_owner_type(OWNER_TYPE_NVMF_TCP, 't');
119 0 : spdk_trace_register_object(OBJECT_NVMF_TCP_IO, 'r');
120 0 : spdk_trace_register_description("TCP_REQ_NEW",
121 : TRACE_TCP_REQUEST_STATE_NEW,
122 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 1,
123 : SPDK_TRACE_ARG_TYPE_INT, "qd");
124 0 : spdk_trace_register_description("TCP_REQ_NEED_BUFFER",
125 : TRACE_TCP_REQUEST_STATE_NEED_BUFFER,
126 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
127 : SPDK_TRACE_ARG_TYPE_INT, "");
128 0 : spdk_trace_register_description("TCP_REQ_WAIT_ZCPY_START",
129 : TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_START,
130 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
131 : SPDK_TRACE_ARG_TYPE_INT, "");
132 0 : spdk_trace_register_description("TCP_REQ_ZCPY_START_CPL",
133 : TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED,
134 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
135 : SPDK_TRACE_ARG_TYPE_INT, "");
136 0 : spdk_trace_register_description("TCP_REQ_TX_H_TO_C",
137 : TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
138 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
139 : SPDK_TRACE_ARG_TYPE_INT, "");
140 0 : spdk_trace_register_description("TCP_REQ_RDY_TO_EXECUTE",
141 : TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE,
142 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
143 : SPDK_TRACE_ARG_TYPE_INT, "");
144 0 : spdk_trace_register_description("TCP_REQ_EXECUTING",
145 : TRACE_TCP_REQUEST_STATE_EXECUTING,
146 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
147 : SPDK_TRACE_ARG_TYPE_INT, "");
148 0 : spdk_trace_register_description("TCP_REQ_WAIT_ZCPY_CMT",
149 : TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT,
150 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
151 : SPDK_TRACE_ARG_TYPE_INT, "");
152 0 : spdk_trace_register_description("TCP_REQ_EXECUTED",
153 : TRACE_TCP_REQUEST_STATE_EXECUTED,
154 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
155 : SPDK_TRACE_ARG_TYPE_INT, "");
156 0 : spdk_trace_register_description("TCP_REQ_RDY_TO_COMPLETE",
157 : TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE,
158 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
159 : SPDK_TRACE_ARG_TYPE_INT, "");
160 0 : spdk_trace_register_description("TCP_REQ_TRANSFER_C2H",
161 : TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
162 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
163 : SPDK_TRACE_ARG_TYPE_INT, "");
164 0 : spdk_trace_register_description("TCP_REQ_AWAIT_ZCPY_RLS",
165 : TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE,
166 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
167 : SPDK_TRACE_ARG_TYPE_INT, "");
168 0 : spdk_trace_register_description("TCP_REQ_COMPLETED",
169 : TRACE_TCP_REQUEST_STATE_COMPLETED,
170 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
171 : SPDK_TRACE_ARG_TYPE_INT, "qd");
172 0 : spdk_trace_register_description("TCP_READ_DONE",
173 : TRACE_TCP_READ_FROM_SOCKET_DONE,
174 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
175 : SPDK_TRACE_ARG_TYPE_INT, "");
176 0 : spdk_trace_register_description("TCP_REQ_AWAIT_R2T_ACK",
177 : TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK,
178 : OWNER_TYPE_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0,
179 : SPDK_TRACE_ARG_TYPE_INT, "");
180 :
181 0 : spdk_trace_register_description("TCP_QP_CREATE", TRACE_TCP_QP_CREATE,
182 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
183 : SPDK_TRACE_ARG_TYPE_INT, "");
184 0 : spdk_trace_register_description("TCP_QP_SOCK_INIT", TRACE_TCP_QP_SOCK_INIT,
185 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
186 : SPDK_TRACE_ARG_TYPE_INT, "");
187 0 : spdk_trace_register_description("TCP_QP_STATE_CHANGE", TRACE_TCP_QP_STATE_CHANGE,
188 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
189 : SPDK_TRACE_ARG_TYPE_INT, "state");
190 0 : spdk_trace_register_description("TCP_QP_DISCONNECT", TRACE_TCP_QP_DISCONNECT,
191 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
192 : SPDK_TRACE_ARG_TYPE_INT, "");
193 0 : spdk_trace_register_description("TCP_QP_DESTROY", TRACE_TCP_QP_DESTROY,
194 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
195 : SPDK_TRACE_ARG_TYPE_INT, "");
196 0 : spdk_trace_register_description("TCP_QP_ABORT_REQ", TRACE_TCP_QP_ABORT_REQ,
197 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
198 : SPDK_TRACE_ARG_TYPE_INT, "");
199 0 : spdk_trace_register_description("TCP_QP_RCV_STATE_CHANGE", TRACE_TCP_QP_RCV_STATE_CHANGE,
200 : OWNER_TYPE_NVMF_TCP, OBJECT_NONE, 0,
201 : SPDK_TRACE_ARG_TYPE_INT, "state");
202 :
203 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_NVMF_TCP_IO, 1);
204 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_NVMF_TCP_IO, 0);
205 0 : spdk_trace_tpoint_register_relation(TRACE_SOCK_REQ_QUEUE, OBJECT_NVMF_TCP_IO, 0);
206 0 : spdk_trace_tpoint_register_relation(TRACE_SOCK_REQ_PEND, OBJECT_NVMF_TCP_IO, 0);
207 0 : spdk_trace_tpoint_register_relation(TRACE_SOCK_REQ_COMPLETE, OBJECT_NVMF_TCP_IO, 0);
208 0 : }
209 :
210 : struct spdk_nvmf_tcp_req {
211 : struct spdk_nvmf_request req;
212 : struct spdk_nvme_cpl rsp;
213 : struct spdk_nvme_cmd cmd;
214 :
215 : /* A PDU that can be used for sending responses. This is
216 : * not the incoming PDU! */
217 : struct nvme_tcp_pdu *pdu;
218 :
219 : /* In-capsule data buffer */
220 : uint8_t *buf;
221 :
222 : struct spdk_nvmf_tcp_req *fused_pair;
223 :
224 : /*
225 : * The PDU for a request may be used multiple times in serial over
226 : * the request's lifetime. For example, first to send an R2T, then
227 : * to send a completion. To catch mistakes where the PDU is used
228 : * twice at the same time, add a debug flag here for init/fini.
229 : */
230 : bool pdu_in_use;
231 : bool has_in_capsule_data;
232 : bool fused_failed;
233 :
234 : /* transfer_tag */
235 : uint16_t ttag;
236 :
237 : enum spdk_nvmf_tcp_req_state state;
238 :
239 : /*
240 : * h2c_offset is used when we receive the h2c_data PDU.
241 : */
242 : uint32_t h2c_offset;
243 :
244 : STAILQ_ENTRY(spdk_nvmf_tcp_req) link;
245 : TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link;
246 : };
247 :
248 : struct spdk_nvmf_tcp_qpair {
249 : struct spdk_nvmf_qpair qpair;
250 : struct spdk_nvmf_tcp_poll_group *group;
251 : struct spdk_sock *sock;
252 :
253 : enum nvme_tcp_pdu_recv_state recv_state;
254 : enum nvme_tcp_qpair_state state;
255 :
256 : /* PDU being actively received */
257 : struct nvme_tcp_pdu *pdu_in_progress;
258 :
259 : struct spdk_nvmf_tcp_req *fused_first;
260 :
261 : /* Queues to track the requests in all states */
262 : TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_working_queue;
263 : TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_free_queue;
264 : SLIST_HEAD(, nvme_tcp_pdu) tcp_pdu_free_queue;
265 : /* Number of working pdus */
266 : uint32_t tcp_pdu_working_count;
267 :
268 : /* Number of requests in each state */
269 : uint32_t state_cntr[TCP_REQUEST_NUM_STATES];
270 :
271 : uint8_t cpda;
272 :
273 : bool host_hdgst_enable;
274 : bool host_ddgst_enable;
275 :
276 : /* This is a spare PDU used for sending special management
277 : * operations. Primarily, this is used for the initial
278 : * connection response and c2h termination request. */
279 : struct nvme_tcp_pdu *mgmt_pdu;
280 :
281 : /* Arrays of in-capsule buffers, requests, and pdus.
282 : * Each array is 'resource_count' number of elements */
283 : void *bufs;
284 : struct spdk_nvmf_tcp_req *reqs;
285 : struct nvme_tcp_pdu *pdus;
286 : uint32_t resource_count;
287 : uint32_t recv_buf_size;
288 :
289 : struct spdk_nvmf_tcp_port *port;
290 :
291 : /* IP address */
292 : char initiator_addr[SPDK_NVMF_TRADDR_MAX_LEN];
293 : char target_addr[SPDK_NVMF_TRADDR_MAX_LEN];
294 :
295 : /* IP port */
296 : uint16_t initiator_port;
297 : uint16_t target_port;
298 :
299 : /* Wait until the host terminates the connection (e.g. after sending C2HTermReq) */
300 : bool wait_terminate;
301 :
302 : /* Timer used to destroy qpair after detecting transport error issue if initiator does
303 : * not close the connection.
304 : */
305 : struct spdk_poller *timeout_poller;
306 :
307 : spdk_nvmf_transport_qpair_fini_cb fini_cb_fn;
308 : void *fini_cb_arg;
309 :
310 : TAILQ_ENTRY(spdk_nvmf_tcp_qpair) link;
311 : };
312 :
313 : struct spdk_nvmf_tcp_control_msg {
314 : STAILQ_ENTRY(spdk_nvmf_tcp_control_msg) link;
315 : };
316 :
317 : struct spdk_nvmf_tcp_control_msg_list {
318 : void *msg_buf;
319 : STAILQ_HEAD(, spdk_nvmf_tcp_control_msg) free_msgs;
320 : };
321 :
322 : struct spdk_nvmf_tcp_poll_group {
323 : struct spdk_nvmf_transport_poll_group group;
324 : struct spdk_sock_group *sock_group;
325 :
326 : TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs;
327 : TAILQ_HEAD(, spdk_nvmf_tcp_qpair) await_req;
328 :
329 : struct spdk_io_channel *accel_channel;
330 : struct spdk_nvmf_tcp_control_msg_list *control_msg_list;
331 :
332 : TAILQ_ENTRY(spdk_nvmf_tcp_poll_group) link;
333 : };
334 :
335 : struct spdk_nvmf_tcp_port {
336 : const struct spdk_nvme_transport_id *trid;
337 : struct spdk_sock *listen_sock;
338 : TAILQ_ENTRY(spdk_nvmf_tcp_port) link;
339 : };
340 :
341 : struct tcp_transport_opts {
342 : bool c2h_success;
343 : uint16_t control_msg_num;
344 : uint32_t sock_priority;
345 : };
346 :
347 : struct tcp_psk_entry {
348 : char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
349 : char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
350 : char pskid[NVMF_PSK_IDENTITY_LEN];
351 : uint8_t psk[SPDK_TLS_PSK_MAX_LEN];
352 : struct spdk_key *key;
353 :
354 : /* Original path saved to emit SPDK configuration via "save_config". */
355 : char psk_path[PATH_MAX];
356 : uint32_t psk_size;
357 : enum nvme_tcp_cipher_suite tls_cipher_suite;
358 : TAILQ_ENTRY(tcp_psk_entry) link;
359 : };
360 :
361 : struct spdk_nvmf_tcp_transport {
362 : struct spdk_nvmf_transport transport;
363 : struct tcp_transport_opts tcp_opts;
364 : uint32_t ack_timeout;
365 :
366 : struct spdk_nvmf_tcp_poll_group *next_pg;
367 :
368 : struct spdk_poller *accept_poller;
369 :
370 : TAILQ_HEAD(, spdk_nvmf_tcp_port) ports;
371 : TAILQ_HEAD(, spdk_nvmf_tcp_poll_group) poll_groups;
372 :
373 : TAILQ_HEAD(, tcp_psk_entry) psks;
374 : };
375 :
376 : static const struct spdk_json_object_decoder tcp_transport_opts_decoder[] = {
377 : {
378 : "c2h_success", offsetof(struct tcp_transport_opts, c2h_success),
379 : spdk_json_decode_bool, true
380 : },
381 : {
382 : "control_msg_num", offsetof(struct tcp_transport_opts, control_msg_num),
383 : spdk_json_decode_uint16, true
384 : },
385 : {
386 : "sock_priority", offsetof(struct tcp_transport_opts, sock_priority),
387 : spdk_json_decode_uint32, true
388 : },
389 : };
390 :
391 : static bool nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
392 : struct spdk_nvmf_tcp_req *tcp_req);
393 : static void nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);
394 :
395 : static void _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
396 : struct spdk_nvmf_tcp_req *tcp_req);
397 :
398 : static inline void
399 7 : nvmf_tcp_req_set_state(struct spdk_nvmf_tcp_req *tcp_req,
400 : enum spdk_nvmf_tcp_req_state state)
401 : {
402 : struct spdk_nvmf_qpair *qpair;
403 : struct spdk_nvmf_tcp_qpair *tqpair;
404 :
405 7 : qpair = tcp_req->req.qpair;
406 7 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
407 :
408 7 : assert(tqpair->state_cntr[tcp_req->state] > 0);
409 7 : tqpair->state_cntr[tcp_req->state]--;
410 7 : tqpair->state_cntr[state]++;
411 :
412 7 : tcp_req->state = state;
413 7 : }
414 :
415 : static inline struct nvme_tcp_pdu *
416 7 : nvmf_tcp_req_pdu_init(struct spdk_nvmf_tcp_req *tcp_req)
417 : {
418 7 : assert(tcp_req->pdu_in_use == false);
419 :
420 7 : memset(tcp_req->pdu, 0, sizeof(*tcp_req->pdu));
421 7 : tcp_req->pdu->qpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
422 :
423 7 : return tcp_req->pdu;
424 : }
425 :
426 : static struct spdk_nvmf_tcp_req *
427 1 : nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
428 : {
429 : struct spdk_nvmf_tcp_req *tcp_req;
430 :
431 1 : tcp_req = TAILQ_FIRST(&tqpair->tcp_req_free_queue);
432 1 : if (spdk_unlikely(!tcp_req)) {
433 0 : return NULL;
434 : }
435 :
436 1 : memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp));
437 1 : tcp_req->h2c_offset = 0;
438 1 : tcp_req->has_in_capsule_data = false;
439 1 : tcp_req->req.dif_enabled = false;
440 1 : tcp_req->req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
441 :
442 1 : TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link);
443 1 : TAILQ_INSERT_TAIL(&tqpair->tcp_req_working_queue, tcp_req, state_link);
444 1 : tqpair->qpair.queue_depth++;
445 1 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW);
446 1 : return tcp_req;
447 : }
448 :
449 : static inline void
450 0 : nvmf_tcp_req_put(struct spdk_nvmf_tcp_qpair *tqpair, struct spdk_nvmf_tcp_req *tcp_req)
451 : {
452 0 : assert(!tcp_req->pdu_in_use);
453 :
454 0 : TAILQ_REMOVE(&tqpair->tcp_req_working_queue, tcp_req, state_link);
455 0 : TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link);
456 0 : tqpair->qpair.queue_depth--;
457 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE);
458 0 : }
459 :
460 : static void
461 0 : nvmf_tcp_request_free(void *cb_arg)
462 : {
463 : struct spdk_nvmf_tcp_transport *ttransport;
464 0 : struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
465 :
466 0 : assert(tcp_req != NULL);
467 :
468 0 : SPDK_DEBUGLOG(nvmf_tcp, "tcp_req=%p will be freed\n", tcp_req);
469 0 : ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport,
470 : struct spdk_nvmf_tcp_transport, transport);
471 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED);
472 0 : nvmf_tcp_req_process(ttransport, tcp_req);
473 0 : }
474 :
475 : static int
476 0 : nvmf_tcp_req_free(struct spdk_nvmf_request *req)
477 : {
478 0 : struct spdk_nvmf_tcp_req *tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
479 :
480 0 : nvmf_tcp_request_free(tcp_req);
481 :
482 0 : return 0;
483 : }
484 :
485 : static void
486 6 : nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair,
487 : enum spdk_nvmf_tcp_req_state state)
488 : {
489 : struct spdk_nvmf_tcp_req *tcp_req, *req_tmp;
490 :
491 6 : assert(state != TCP_REQUEST_STATE_FREE);
492 6 : TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) {
493 0 : if (state == tcp_req->state) {
494 0 : nvmf_tcp_request_free(tcp_req);
495 : }
496 : }
497 6 : }
498 :
499 : static void
500 1 : nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
501 : {
502 : struct spdk_nvmf_tcp_req *tcp_req, *req_tmp;
503 :
504 1 : nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
505 1 : nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW);
506 :
507 : /* Wipe the requests waiting for buffer from the global list */
508 1 : TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) {
509 0 : if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) {
510 0 : STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req,
511 : spdk_nvmf_request, buf_link);
512 : }
513 : }
514 :
515 1 : nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
516 1 : nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING);
517 1 : nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
518 1 : nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK);
519 1 : }
520 :
521 : static void
522 0 : nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair)
523 : {
524 : int i;
525 : struct spdk_nvmf_tcp_req *tcp_req;
526 :
527 0 : SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", tqpair->qpair.qid);
528 0 : for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) {
529 0 : SPDK_ERRLOG("\tNum of requests in state[%d] = %u\n", i, tqpair->state_cntr[i]);
530 0 : TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) {
531 0 : if ((int)tcp_req->state == i) {
532 0 : SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool);
533 0 : SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode);
534 : }
535 : }
536 : }
537 0 : }
538 :
539 : static void
540 1 : _nvmf_tcp_qpair_destroy(void *_tqpair)
541 : {
542 1 : struct spdk_nvmf_tcp_qpair *tqpair = _tqpair;
543 1 : spdk_nvmf_transport_qpair_fini_cb cb_fn = tqpair->fini_cb_fn;
544 1 : void *cb_arg = tqpair->fini_cb_arg;
545 1 : int err = 0;
546 :
547 1 : spdk_trace_record(TRACE_TCP_QP_DESTROY, tqpair->qpair.trace_id, 0, 0);
548 :
549 1 : SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
550 :
551 1 : err = spdk_sock_close(&tqpair->sock);
552 1 : assert(err == 0);
553 1 : nvmf_tcp_cleanup_all_states(tqpair);
554 :
555 1 : if (tqpair->state_cntr[TCP_REQUEST_STATE_FREE] != tqpair->resource_count) {
556 0 : SPDK_ERRLOG("tqpair(%p) free tcp request num is %u but should be %u\n", tqpair,
557 : tqpair->state_cntr[TCP_REQUEST_STATE_FREE],
558 : tqpair->resource_count);
559 0 : err++;
560 : }
561 :
562 1 : if (err > 0) {
563 0 : nvmf_tcp_dump_qpair_req_contents(tqpair);
564 : }
565 :
566 : /* The timeout poller might still be registered here if we close the qpair before host
567 : * terminates the connection.
568 : */
569 1 : spdk_poller_unregister(&tqpair->timeout_poller);
570 1 : spdk_dma_free(tqpair->pdus);
571 1 : free(tqpair->reqs);
572 1 : spdk_free(tqpair->bufs);
573 1 : spdk_trace_unregister_owner(tqpair->qpair.trace_id);
574 1 : free(tqpair);
575 :
576 1 : if (cb_fn != NULL) {
577 0 : cb_fn(cb_arg);
578 : }
579 :
580 1 : SPDK_DEBUGLOG(nvmf_tcp, "Leave\n");
581 1 : }
582 :
583 : static void
584 1 : nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair)
585 : {
586 : /* Delay the destruction to make sure it isn't performed from the context of a sock
587 : * callback. Otherwise, spdk_sock_close() might not abort pending requests, causing their
588 : * completions to be executed after the qpair is freed. (Note: this fixed issue #2471.)
589 : */
590 1 : spdk_thread_send_msg(spdk_get_thread(), _nvmf_tcp_qpair_destroy, tqpair);
591 1 : }
592 :
593 : static void
594 0 : nvmf_tcp_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
595 : {
596 : struct spdk_nvmf_tcp_transport *ttransport;
597 0 : assert(w != NULL);
598 :
599 0 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
600 0 : spdk_json_write_named_bool(w, "c2h_success", ttransport->tcp_opts.c2h_success);
601 0 : spdk_json_write_named_uint32(w, "sock_priority", ttransport->tcp_opts.sock_priority);
602 0 : }
603 :
604 : static void
605 1 : nvmf_tcp_free_psk_entry(struct tcp_psk_entry *entry)
606 : {
607 1 : if (entry == NULL) {
608 0 : return;
609 : }
610 :
611 1 : spdk_memset_s(entry->psk, sizeof(entry->psk), 0, sizeof(entry->psk));
612 1 : spdk_keyring_put_key(entry->key);
613 1 : free(entry);
614 : }
615 :
616 : static int
617 5 : nvmf_tcp_destroy(struct spdk_nvmf_transport *transport,
618 : spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
619 : {
620 : struct spdk_nvmf_tcp_transport *ttransport;
621 : struct tcp_psk_entry *entry, *tmp;
622 :
623 5 : assert(transport != NULL);
624 5 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
625 :
626 5 : TAILQ_FOREACH_SAFE(entry, &ttransport->psks, link, tmp) {
627 0 : TAILQ_REMOVE(&ttransport->psks, entry, link);
628 0 : nvmf_tcp_free_psk_entry(entry);
629 : }
630 :
631 5 : spdk_poller_unregister(&ttransport->accept_poller);
632 5 : free(ttransport);
633 :
634 5 : if (cb_fn) {
635 0 : cb_fn(cb_arg);
636 : }
637 5 : return 0;
638 : }
639 :
640 : static int nvmf_tcp_accept(void *ctx);
641 :
642 : static struct spdk_nvmf_transport *
643 6 : nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
644 : {
645 : struct spdk_nvmf_tcp_transport *ttransport;
646 : uint32_t sge_count;
647 : uint32_t min_shared_buffers;
648 :
649 6 : ttransport = calloc(1, sizeof(*ttransport));
650 6 : if (!ttransport) {
651 0 : return NULL;
652 : }
653 :
654 6 : TAILQ_INIT(&ttransport->ports);
655 6 : TAILQ_INIT(&ttransport->poll_groups);
656 6 : TAILQ_INIT(&ttransport->psks);
657 :
658 6 : ttransport->transport.ops = &spdk_nvmf_transport_tcp;
659 :
660 6 : ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION;
661 6 : ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY;
662 6 : ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM;
663 6 : if (opts->transport_specific != NULL &&
664 0 : spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder,
665 : SPDK_COUNTOF(tcp_transport_opts_decoder),
666 0 : &ttransport->tcp_opts)) {
667 0 : SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
668 0 : free(ttransport);
669 0 : return NULL;
670 : }
671 :
672 6 : SPDK_NOTICELOG("*** TCP Transport Init ***\n");
673 :
674 6 : SPDK_INFOLOG(nvmf_tcp, "*** TCP Transport Init ***\n"
675 : " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
676 : " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
677 : " in_capsule_data_size=%d, max_aq_depth=%d\n"
678 : " num_shared_buffers=%d, c2h_success=%d,\n"
679 : " dif_insert_or_strip=%d, sock_priority=%d\n"
680 : " abort_timeout_sec=%d, control_msg_num=%hu\n"
681 : " ack_timeout=%d\n",
682 : opts->max_queue_depth,
683 : opts->max_io_size,
684 : opts->max_qpairs_per_ctrlr - 1,
685 : opts->io_unit_size,
686 : opts->in_capsule_data_size,
687 : opts->max_aq_depth,
688 : opts->num_shared_buffers,
689 : ttransport->tcp_opts.c2h_success,
690 : opts->dif_insert_or_strip,
691 : ttransport->tcp_opts.sock_priority,
692 : opts->abort_timeout_sec,
693 : ttransport->tcp_opts.control_msg_num,
694 : opts->ack_timeout);
695 :
696 6 : if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) {
697 0 : SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n"
698 : "you can use man 7 socket to view the range of priority under SO_PRIORITY item\n",
699 : ttransport->tcp_opts.sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY);
700 0 : free(ttransport);
701 0 : return NULL;
702 : }
703 :
704 6 : if (ttransport->tcp_opts.control_msg_num == 0 &&
705 0 : opts->in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
706 0 : SPDK_WARNLOG("TCP param control_msg_num can't be 0 if ICD is less than %u bytes. Using default value %u\n",
707 : SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM);
708 0 : ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM;
709 : }
710 :
711 : /* I/O unit size cannot be larger than max I/O size */
712 6 : if (opts->io_unit_size > opts->max_io_size) {
713 1 : SPDK_WARNLOG("TCP param io_unit_size %u can't be larger than max_io_size %u. Using max_io_size as io_unit_size\n",
714 : opts->io_unit_size, opts->max_io_size);
715 1 : opts->io_unit_size = opts->max_io_size;
716 : }
717 :
718 : /* In capsule data size cannot be larger than max I/O size */
719 6 : if (opts->in_capsule_data_size > opts->max_io_size) {
720 0 : SPDK_WARNLOG("TCP param ICD size %u can't be larger than max_io_size %u. Using max_io_size as ICD size\n",
721 : opts->io_unit_size, opts->max_io_size);
722 0 : opts->in_capsule_data_size = opts->max_io_size;
723 : }
724 :
725 : /* max IO queue depth cannot be smaller than 2 or larger than 65535.
726 : * We will not check SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH, because max_queue_depth is 16bits and always not larger than 64k. */
727 6 : if (opts->max_queue_depth < SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH) {
728 0 : SPDK_WARNLOG("TCP param max_queue_depth %u can't be smaller than %u or larger than %u. Using default value %u\n",
729 : opts->max_queue_depth, SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH,
730 : SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH, SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH);
731 0 : opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH;
732 : }
733 :
734 : /* max admin queue depth cannot be smaller than 2 or larger than 4096 */
735 6 : if (opts->max_aq_depth < SPDK_NVMF_TCP_MIN_ADMIN_QUEUE_DEPTH ||
736 6 : opts->max_aq_depth > SPDK_NVMF_TCP_MAX_ADMIN_QUEUE_DEPTH) {
737 0 : SPDK_WARNLOG("TCP param max_aq_depth %u can't be smaller than %u or larger than %u. Using default value %u\n",
738 : opts->max_aq_depth, SPDK_NVMF_TCP_MIN_ADMIN_QUEUE_DEPTH,
739 : SPDK_NVMF_TCP_MAX_ADMIN_QUEUE_DEPTH, SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH);
740 0 : opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH;
741 : }
742 :
743 6 : sge_count = opts->max_io_size / opts->io_unit_size;
744 6 : if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) {
745 1 : SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
746 1 : free(ttransport);
747 1 : return NULL;
748 : }
749 :
750 : /* If buf_cache_size == UINT32_MAX, we will dynamically pick a cache size later that we know will fit. */
751 5 : if (opts->buf_cache_size < UINT32_MAX) {
752 5 : min_shared_buffers = spdk_env_get_core_count() * opts->buf_cache_size;
753 5 : if (min_shared_buffers > opts->num_shared_buffers) {
754 0 : SPDK_ERRLOG("There are not enough buffers to satisfy "
755 : "per-poll group caches for each thread. (%" PRIu32 ") "
756 : "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
757 0 : SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
758 0 : free(ttransport);
759 0 : return NULL;
760 : }
761 : }
762 :
763 5 : ttransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_tcp_accept, &ttransport->transport,
764 : opts->acceptor_poll_rate);
765 5 : if (!ttransport->accept_poller) {
766 0 : free(ttransport);
767 0 : return NULL;
768 : }
769 :
770 5 : return &ttransport->transport;
771 : }
772 :
773 : static int
774 0 : nvmf_tcp_trsvcid_to_int(const char *trsvcid)
775 : {
776 : unsigned long long ull;
777 0 : char *end = NULL;
778 :
779 0 : ull = strtoull(trsvcid, &end, 10);
780 0 : if (end == NULL || end == trsvcid || *end != '\0') {
781 0 : return -1;
782 : }
783 :
784 : /* Valid TCP/IP port numbers are in [1, 65535] */
785 0 : if (ull == 0 || ull > 65535) {
786 0 : return -1;
787 : }
788 :
789 0 : return (int)ull;
790 : }
791 :
792 : /**
793 : * Canonicalize a listen address trid.
794 : */
795 : static int
796 0 : nvmf_tcp_canon_listen_trid(struct spdk_nvme_transport_id *canon_trid,
797 : const struct spdk_nvme_transport_id *trid)
798 : {
799 : int trsvcid_int;
800 :
801 0 : trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid);
802 0 : if (trsvcid_int < 0) {
803 0 : return -EINVAL;
804 : }
805 :
806 0 : memset(canon_trid, 0, sizeof(*canon_trid));
807 0 : spdk_nvme_trid_populate_transport(canon_trid, SPDK_NVME_TRANSPORT_TCP);
808 0 : canon_trid->adrfam = trid->adrfam;
809 0 : snprintf(canon_trid->traddr, sizeof(canon_trid->traddr), "%s", trid->traddr);
810 0 : snprintf(canon_trid->trsvcid, sizeof(canon_trid->trsvcid), "%d", trsvcid_int);
811 :
812 0 : return 0;
813 : }
814 :
815 : /**
816 : * Find an existing listening port.
817 : */
818 : static struct spdk_nvmf_tcp_port *
819 0 : nvmf_tcp_find_port(struct spdk_nvmf_tcp_transport *ttransport,
820 : const struct spdk_nvme_transport_id *trid)
821 : {
822 0 : struct spdk_nvme_transport_id canon_trid;
823 : struct spdk_nvmf_tcp_port *port;
824 :
825 0 : if (nvmf_tcp_canon_listen_trid(&canon_trid, trid) != 0) {
826 0 : return NULL;
827 : }
828 :
829 0 : TAILQ_FOREACH(port, &ttransport->ports, link) {
830 0 : if (spdk_nvme_transport_id_compare(&canon_trid, port->trid) == 0) {
831 0 : return port;
832 : }
833 : }
834 :
835 0 : return NULL;
836 : }
837 :
838 : static int
839 0 : tcp_sock_get_key(uint8_t *out, int out_len, const char **cipher, const char *pskid,
840 : void *get_key_ctx)
841 : {
842 : struct tcp_psk_entry *entry;
843 0 : struct spdk_nvmf_tcp_transport *ttransport = get_key_ctx;
844 : size_t psk_len;
845 : int rc;
846 :
847 0 : TAILQ_FOREACH(entry, &ttransport->psks, link) {
848 0 : if (strcmp(pskid, entry->pskid) != 0) {
849 0 : continue;
850 : }
851 :
852 0 : psk_len = entry->psk_size;
853 0 : if ((size_t)out_len < psk_len) {
854 0 : SPDK_ERRLOG("Out buffer of size: %" PRIu32 " cannot fit PSK of len: %lu\n",
855 : out_len, psk_len);
856 0 : return -ENOBUFS;
857 : }
858 :
859 : /* Convert PSK to the TLS PSK format. */
860 0 : rc = nvme_tcp_derive_tls_psk(entry->psk, psk_len, pskid, out, out_len,
861 : entry->tls_cipher_suite);
862 0 : if (rc < 0) {
863 0 : SPDK_ERRLOG("Could not generate TLS PSK\n");
864 : }
865 :
866 0 : switch (entry->tls_cipher_suite) {
867 0 : case NVME_TCP_CIPHER_AES_128_GCM_SHA256:
868 0 : *cipher = "TLS_AES_128_GCM_SHA256";
869 0 : break;
870 0 : case NVME_TCP_CIPHER_AES_256_GCM_SHA384:
871 0 : *cipher = "TLS_AES_256_GCM_SHA384";
872 0 : break;
873 0 : default:
874 0 : *cipher = NULL;
875 0 : return -ENOTSUP;
876 : }
877 :
878 0 : return rc;
879 : }
880 :
881 0 : SPDK_ERRLOG("Could not find PSK for identity: %s\n", pskid);
882 :
883 0 : return -EINVAL;
884 : }
885 :
886 : static int
887 0 : nvmf_tcp_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
888 : struct spdk_nvmf_listen_opts *listen_opts)
889 : {
890 : struct spdk_nvmf_tcp_transport *ttransport;
891 : struct spdk_nvmf_tcp_port *port;
892 : int trsvcid_int;
893 : uint8_t adrfam;
894 : const char *sock_impl_name;
895 0 : struct spdk_sock_impl_opts impl_opts;
896 0 : size_t impl_opts_size = sizeof(impl_opts);
897 0 : struct spdk_sock_opts opts;
898 :
899 0 : if (!strlen(trid->trsvcid)) {
900 0 : SPDK_ERRLOG("Service id is required\n");
901 0 : return -EINVAL;
902 : }
903 :
904 0 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
905 :
906 0 : trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid);
907 0 : if (trsvcid_int < 0) {
908 0 : SPDK_ERRLOG("Invalid trsvcid '%s'\n", trid->trsvcid);
909 0 : return -EINVAL;
910 : }
911 :
912 0 : port = calloc(1, sizeof(*port));
913 0 : if (!port) {
914 0 : SPDK_ERRLOG("Port allocation failed\n");
915 0 : return -ENOMEM;
916 : }
917 :
918 0 : port->trid = trid;
919 :
920 0 : sock_impl_name = NULL;
921 :
922 0 : opts.opts_size = sizeof(opts);
923 0 : spdk_sock_get_default_opts(&opts);
924 0 : opts.priority = ttransport->tcp_opts.sock_priority;
925 0 : opts.ack_timeout = transport->opts.ack_timeout;
926 0 : if (listen_opts->secure_channel) {
927 0 : if (!g_tls_log) {
928 0 : SPDK_NOTICELOG("TLS support is considered experimental\n");
929 0 : g_tls_log = true;
930 : }
931 0 : sock_impl_name = "ssl";
932 0 : spdk_sock_impl_get_opts(sock_impl_name, &impl_opts, &impl_opts_size);
933 0 : impl_opts.tls_version = SPDK_TLS_VERSION_1_3;
934 0 : impl_opts.get_key = tcp_sock_get_key;
935 0 : impl_opts.get_key_ctx = ttransport;
936 0 : impl_opts.tls_cipher_suites = "TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256";
937 0 : opts.impl_opts = &impl_opts;
938 0 : opts.impl_opts_size = sizeof(impl_opts);
939 : }
940 :
941 0 : port->listen_sock = spdk_sock_listen_ext(trid->traddr, trsvcid_int,
942 : sock_impl_name, &opts);
943 0 : if (port->listen_sock == NULL) {
944 0 : SPDK_ERRLOG("spdk_sock_listen(%s, %d) failed: %s (%d)\n",
945 : trid->traddr, trsvcid_int,
946 : spdk_strerror(errno), errno);
947 0 : free(port);
948 0 : return -errno;
949 : }
950 :
951 0 : if (spdk_sock_is_ipv4(port->listen_sock)) {
952 0 : adrfam = SPDK_NVMF_ADRFAM_IPV4;
953 0 : } else if (spdk_sock_is_ipv6(port->listen_sock)) {
954 0 : adrfam = SPDK_NVMF_ADRFAM_IPV6;
955 : } else {
956 0 : SPDK_ERRLOG("Unhandled socket type\n");
957 0 : adrfam = 0;
958 : }
959 :
960 0 : if (adrfam != trid->adrfam) {
961 0 : SPDK_ERRLOG("Socket address family mismatch\n");
962 0 : spdk_sock_close(&port->listen_sock);
963 0 : free(port);
964 0 : return -EINVAL;
965 : }
966 :
967 0 : SPDK_NOTICELOG("*** NVMe/TCP Target Listening on %s port %s ***\n",
968 : trid->traddr, trid->trsvcid);
969 :
970 0 : TAILQ_INSERT_TAIL(&ttransport->ports, port, link);
971 0 : return 0;
972 : }
973 :
974 : static void
975 0 : nvmf_tcp_stop_listen(struct spdk_nvmf_transport *transport,
976 : const struct spdk_nvme_transport_id *trid)
977 : {
978 : struct spdk_nvmf_tcp_transport *ttransport;
979 : struct spdk_nvmf_tcp_port *port;
980 :
981 0 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
982 :
983 0 : SPDK_DEBUGLOG(nvmf_tcp, "Removing listen address %s port %s\n",
984 : trid->traddr, trid->trsvcid);
985 :
986 0 : port = nvmf_tcp_find_port(ttransport, trid);
987 0 : if (port) {
988 0 : TAILQ_REMOVE(&ttransport->ports, port, link);
989 0 : spdk_sock_close(&port->listen_sock);
990 0 : free(port);
991 : }
992 0 : }
993 :
994 : static void nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
995 : enum nvme_tcp_pdu_recv_state state);
996 :
997 : static void
998 1 : nvmf_tcp_qpair_set_state(struct spdk_nvmf_tcp_qpair *tqpair, enum nvme_tcp_qpair_state state)
999 : {
1000 1 : tqpair->state = state;
1001 1 : spdk_trace_record(TRACE_TCP_QP_STATE_CHANGE, tqpair->qpair.trace_id, 0, 0,
1002 : (uint64_t)tqpair->state);
1003 1 : }
1004 :
1005 : static void
1006 0 : nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair)
1007 : {
1008 0 : SPDK_DEBUGLOG(nvmf_tcp, "Disconnecting qpair %p\n", tqpair);
1009 :
1010 0 : spdk_trace_record(TRACE_TCP_QP_DISCONNECT, tqpair->qpair.trace_id, 0, 0);
1011 :
1012 0 : if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) {
1013 0 : nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITING);
1014 0 : assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1015 0 : spdk_poller_unregister(&tqpair->timeout_poller);
1016 :
1017 : /* This will end up calling nvmf_tcp_close_qpair */
1018 0 : spdk_nvmf_qpair_disconnect(&tqpair->qpair);
1019 : }
1020 0 : }
1021 :
1022 : static void
1023 16 : _mgmt_pdu_write_done(void *_tqpair, int err)
1024 : {
1025 16 : struct spdk_nvmf_tcp_qpair *tqpair = _tqpair;
1026 16 : struct nvme_tcp_pdu *pdu = tqpair->mgmt_pdu;
1027 :
1028 16 : if (spdk_unlikely(err != 0)) {
1029 16 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1030 16 : return;
1031 : }
1032 :
1033 0 : assert(pdu->cb_fn != NULL);
1034 0 : pdu->cb_fn(pdu->cb_arg);
1035 : }
1036 :
1037 : static void
1038 0 : _req_pdu_write_done(void *req, int err)
1039 : {
1040 0 : struct spdk_nvmf_tcp_req *tcp_req = req;
1041 0 : struct nvme_tcp_pdu *pdu = tcp_req->pdu;
1042 0 : struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair;
1043 :
1044 0 : assert(tcp_req->pdu_in_use);
1045 0 : tcp_req->pdu_in_use = false;
1046 :
1047 : /* If the request is in a completed state, we're waiting for write completion to free it */
1048 0 : if (spdk_unlikely(tcp_req->state == TCP_REQUEST_STATE_COMPLETED)) {
1049 0 : nvmf_tcp_request_free(tcp_req);
1050 0 : return;
1051 : }
1052 :
1053 0 : if (spdk_unlikely(err != 0)) {
1054 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1055 0 : return;
1056 : }
1057 :
1058 0 : assert(pdu->cb_fn != NULL);
1059 0 : pdu->cb_fn(pdu->cb_arg);
1060 : }
1061 :
1062 : static void
1063 16 : _pdu_write_done(struct nvme_tcp_pdu *pdu, int err)
1064 : {
1065 16 : pdu->sock_req.cb_fn(pdu->sock_req.cb_arg, err);
1066 16 : }
1067 :
1068 : static void
1069 23 : _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
1070 : {
1071 : int rc;
1072 23 : uint32_t mapped_length;
1073 23 : struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair;
1074 :
1075 46 : pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu,
1076 23 : tqpair->host_hdgst_enable, tqpair->host_ddgst_enable, &mapped_length);
1077 23 : spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
1078 :
1079 23 : if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ||
1080 22 : pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) {
1081 : /* Try to force the send immediately. */
1082 16 : rc = spdk_sock_flush(tqpair->sock);
1083 16 : if (rc > 0 && (uint32_t)rc == mapped_length) {
1084 0 : _pdu_write_done(pdu, 0);
1085 : } else {
1086 16 : SPDK_ERRLOG("Could not write %s to socket: rc=%d, errno=%d\n",
1087 : pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ?
1088 : "IC_RESP" : "TERM_REQ", rc, errno);
1089 16 : _pdu_write_done(pdu, rc >= 0 ? -EAGAIN : -errno);
1090 : }
1091 : }
1092 23 : }
1093 :
1094 : static void
1095 0 : data_crc32_accel_done(void *cb_arg, int status)
1096 : {
1097 0 : struct nvme_tcp_pdu *pdu = cb_arg;
1098 :
1099 0 : if (spdk_unlikely(status)) {
1100 0 : SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
1101 0 : _pdu_write_done(pdu, status);
1102 0 : return;
1103 : }
1104 :
1105 0 : pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
1106 0 : MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
1107 :
1108 0 : _tcp_write_pdu(pdu);
1109 : }
1110 :
1111 : static void
1112 23 : pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu)
1113 : {
1114 23 : struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair;
1115 23 : int rc = 0;
1116 :
1117 : /* Data Digest */
1118 23 : if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) {
1119 : /* Only support this limitated case for the first step */
1120 0 : if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
1121 : && tqpair->group)) {
1122 0 : rc = spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, pdu->data_iov,
1123 : pdu->data_iovcnt, 0, data_crc32_accel_done, pdu);
1124 0 : if (spdk_likely(rc == 0)) {
1125 0 : return;
1126 : }
1127 : } else {
1128 0 : pdu->data_digest_crc32 = nvme_tcp_pdu_calc_data_digest(pdu);
1129 : }
1130 0 : data_crc32_accel_done(pdu, rc);
1131 : } else {
1132 23 : _tcp_write_pdu(pdu);
1133 : }
1134 : }
1135 :
1136 : static void
1137 23 : nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
1138 : struct nvme_tcp_pdu *pdu,
1139 : nvme_tcp_qpair_xfer_complete_cb cb_fn,
1140 : void *cb_arg)
1141 : {
1142 : int hlen;
1143 : uint32_t crc32c;
1144 :
1145 23 : assert(tqpair->pdu_in_progress != pdu);
1146 :
1147 23 : hlen = pdu->hdr.common.hlen;
1148 23 : pdu->cb_fn = cb_fn;
1149 23 : pdu->cb_arg = cb_arg;
1150 :
1151 23 : pdu->iov[0].iov_base = &pdu->hdr.raw;
1152 23 : pdu->iov[0].iov_len = hlen;
1153 :
1154 : /* Header Digest */
1155 23 : if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable) {
1156 1 : crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1157 1 : MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c);
1158 : }
1159 :
1160 : /* Data Digest */
1161 23 : pdu_data_crc32_compute(pdu);
1162 23 : }
1163 :
1164 : static void
1165 16 : nvmf_tcp_qpair_write_mgmt_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
1166 : nvme_tcp_qpair_xfer_complete_cb cb_fn,
1167 : void *cb_arg)
1168 : {
1169 16 : struct nvme_tcp_pdu *pdu = tqpair->mgmt_pdu;
1170 :
1171 16 : pdu->sock_req.cb_fn = _mgmt_pdu_write_done;
1172 16 : pdu->sock_req.cb_arg = tqpair;
1173 :
1174 16 : nvmf_tcp_qpair_write_pdu(tqpair, pdu, cb_fn, cb_arg);
1175 16 : }
1176 :
1177 : static void
1178 7 : nvmf_tcp_qpair_write_req_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
1179 : struct spdk_nvmf_tcp_req *tcp_req,
1180 : nvme_tcp_qpair_xfer_complete_cb cb_fn,
1181 : void *cb_arg)
1182 : {
1183 7 : struct nvme_tcp_pdu *pdu = tcp_req->pdu;
1184 :
1185 7 : pdu->sock_req.cb_fn = _req_pdu_write_done;
1186 7 : pdu->sock_req.cb_arg = tcp_req;
1187 :
1188 7 : assert(!tcp_req->pdu_in_use);
1189 7 : tcp_req->pdu_in_use = true;
1190 :
1191 7 : nvmf_tcp_qpair_write_pdu(tqpair, pdu, cb_fn, cb_arg);
1192 7 : }
1193 :
1194 : static int
1195 1 : nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
1196 : {
1197 : uint32_t i;
1198 : struct spdk_nvmf_transport_opts *opts;
1199 : uint32_t in_capsule_data_size;
1200 :
1201 1 : opts = &tqpair->qpair.transport->opts;
1202 :
1203 1 : in_capsule_data_size = opts->in_capsule_data_size;
1204 1 : if (opts->dif_insert_or_strip) {
1205 0 : in_capsule_data_size = SPDK_BDEV_BUF_SIZE_WITH_MD(in_capsule_data_size);
1206 : }
1207 :
1208 1 : tqpair->resource_count = opts->max_queue_depth;
1209 :
1210 1 : tqpair->reqs = calloc(tqpair->resource_count, sizeof(*tqpair->reqs));
1211 1 : if (!tqpair->reqs) {
1212 0 : SPDK_ERRLOG("Unable to allocate reqs on tqpair=%p\n", tqpair);
1213 0 : return -1;
1214 : }
1215 :
1216 1 : if (in_capsule_data_size) {
1217 1 : tqpair->bufs = spdk_zmalloc(tqpair->resource_count * in_capsule_data_size, 0x1000,
1218 : NULL, SPDK_ENV_LCORE_ID_ANY,
1219 : SPDK_MALLOC_DMA);
1220 1 : if (!tqpair->bufs) {
1221 0 : SPDK_ERRLOG("Unable to allocate bufs on tqpair=%p.\n", tqpair);
1222 0 : return -1;
1223 : }
1224 : }
1225 : /* prepare memory space for receiving pdus and tcp_req */
1226 : /* Add additional 1 member, which will be used for mgmt_pdu owned by the tqpair */
1227 1 : tqpair->pdus = spdk_dma_zmalloc((2 * tqpair->resource_count + 1) * sizeof(*tqpair->pdus), 0x1000,
1228 : NULL);
1229 1 : if (!tqpair->pdus) {
1230 0 : SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair);
1231 0 : return -1;
1232 : }
1233 :
1234 129 : for (i = 0; i < tqpair->resource_count; i++) {
1235 128 : struct spdk_nvmf_tcp_req *tcp_req = &tqpair->reqs[i];
1236 :
1237 128 : tcp_req->ttag = i + 1;
1238 128 : tcp_req->req.qpair = &tqpair->qpair;
1239 :
1240 128 : tcp_req->pdu = &tqpair->pdus[i];
1241 128 : tcp_req->pdu->qpair = tqpair;
1242 :
1243 : /* Set up memory to receive commands */
1244 128 : if (tqpair->bufs) {
1245 128 : tcp_req->buf = (void *)((uintptr_t)tqpair->bufs + (i * in_capsule_data_size));
1246 : }
1247 :
1248 : /* Set the cmdn and rsp */
1249 128 : tcp_req->req.rsp = (union nvmf_c2h_msg *)&tcp_req->rsp;
1250 128 : tcp_req->req.cmd = (union nvmf_h2c_msg *)&tcp_req->cmd;
1251 :
1252 128 : tcp_req->req.stripped_data = NULL;
1253 :
1254 : /* Initialize request state to FREE */
1255 128 : tcp_req->state = TCP_REQUEST_STATE_FREE;
1256 128 : TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link);
1257 128 : tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++;
1258 : }
1259 :
1260 129 : for (; i < 2 * tqpair->resource_count; i++) {
1261 128 : struct nvme_tcp_pdu *pdu = &tqpair->pdus[i];
1262 :
1263 128 : pdu->qpair = tqpair;
1264 128 : SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, pdu, slist);
1265 : }
1266 :
1267 1 : tqpair->mgmt_pdu = &tqpair->pdus[i];
1268 1 : tqpair->mgmt_pdu->qpair = tqpair;
1269 1 : tqpair->pdu_in_progress = SLIST_FIRST(&tqpair->tcp_pdu_free_queue);
1270 1 : SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist);
1271 1 : tqpair->tcp_pdu_working_count = 1;
1272 :
1273 1 : tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
1274 : SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
1275 :
1276 1 : return 0;
1277 : }
1278 :
1279 : static int
1280 1 : nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
1281 : {
1282 : struct spdk_nvmf_tcp_qpair *tqpair;
1283 :
1284 1 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
1285 :
1286 1 : SPDK_DEBUGLOG(nvmf_tcp, "New TCP Connection: %p\n", qpair);
1287 :
1288 1 : spdk_trace_record(TRACE_TCP_QP_CREATE, tqpair->qpair.trace_id, 0, 0);
1289 :
1290 : /* Initialise request state queues of the qpair */
1291 1 : TAILQ_INIT(&tqpair->tcp_req_free_queue);
1292 1 : TAILQ_INIT(&tqpair->tcp_req_working_queue);
1293 1 : SLIST_INIT(&tqpair->tcp_pdu_free_queue);
1294 1 : tqpair->qpair.queue_depth = 0;
1295 :
1296 1 : tqpair->host_hdgst_enable = true;
1297 1 : tqpair->host_ddgst_enable = true;
1298 :
1299 1 : return 0;
1300 : }
1301 :
1302 : static int
1303 0 : nvmf_tcp_qpair_sock_init(struct spdk_nvmf_tcp_qpair *tqpair)
1304 : {
1305 0 : char saddr[32], caddr[32];
1306 0 : uint16_t sport, cport;
1307 0 : char owner[256];
1308 : int rc;
1309 :
1310 0 : rc = spdk_sock_getaddr(tqpair->sock, saddr, sizeof(saddr), &sport,
1311 : caddr, sizeof(caddr), &cport);
1312 0 : if (rc != 0) {
1313 0 : SPDK_ERRLOG("spdk_sock_getaddr() failed\n");
1314 0 : return rc;
1315 : }
1316 0 : snprintf(owner, sizeof(owner), "%s:%d", caddr, cport);
1317 0 : tqpair->qpair.trace_id = spdk_trace_register_owner(OWNER_TYPE_NVMF_TCP, owner);
1318 0 : spdk_trace_record(TRACE_TCP_QP_SOCK_INIT, tqpair->qpair.trace_id, 0, 0);
1319 :
1320 : /* set low water mark */
1321 0 : rc = spdk_sock_set_recvlowat(tqpair->sock, 1);
1322 0 : if (rc != 0) {
1323 0 : SPDK_ERRLOG("spdk_sock_set_recvlowat() failed\n");
1324 0 : return rc;
1325 : }
1326 :
1327 0 : return 0;
1328 : }
1329 :
1330 : static void
1331 0 : nvmf_tcp_handle_connect(struct spdk_nvmf_transport *transport,
1332 : struct spdk_nvmf_tcp_port *port,
1333 : struct spdk_sock *sock)
1334 : {
1335 : struct spdk_nvmf_tcp_qpair *tqpair;
1336 : int rc;
1337 :
1338 0 : SPDK_DEBUGLOG(nvmf_tcp, "New connection accepted on %s port %s\n",
1339 : port->trid->traddr, port->trid->trsvcid);
1340 :
1341 0 : tqpair = calloc(1, sizeof(struct spdk_nvmf_tcp_qpair));
1342 0 : if (tqpair == NULL) {
1343 0 : SPDK_ERRLOG("Could not allocate new connection.\n");
1344 0 : spdk_sock_close(&sock);
1345 0 : return;
1346 : }
1347 :
1348 0 : tqpair->sock = sock;
1349 0 : tqpair->state_cntr[TCP_REQUEST_STATE_FREE] = 0;
1350 0 : tqpair->port = port;
1351 0 : tqpair->qpair.transport = transport;
1352 :
1353 0 : rc = spdk_sock_getaddr(tqpair->sock, tqpair->target_addr,
1354 : sizeof(tqpair->target_addr), &tqpair->target_port,
1355 0 : tqpair->initiator_addr, sizeof(tqpair->initiator_addr),
1356 : &tqpair->initiator_port);
1357 0 : if (rc < 0) {
1358 0 : SPDK_ERRLOG("spdk_sock_getaddr() failed of tqpair=%p\n", tqpair);
1359 0 : nvmf_tcp_qpair_destroy(tqpair);
1360 0 : return;
1361 : }
1362 :
1363 0 : spdk_nvmf_tgt_new_qpair(transport->tgt, &tqpair->qpair);
1364 : }
1365 :
1366 : static uint32_t
1367 0 : nvmf_tcp_port_accept(struct spdk_nvmf_transport *transport, struct spdk_nvmf_tcp_port *port)
1368 : {
1369 : struct spdk_sock *sock;
1370 0 : uint32_t count = 0;
1371 : int i;
1372 :
1373 0 : for (i = 0; i < NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME; i++) {
1374 0 : sock = spdk_sock_accept(port->listen_sock);
1375 0 : if (sock == NULL) {
1376 0 : break;
1377 : }
1378 0 : count++;
1379 0 : nvmf_tcp_handle_connect(transport, port, sock);
1380 : }
1381 :
1382 0 : return count;
1383 : }
1384 :
1385 : static int
1386 0 : nvmf_tcp_accept(void *ctx)
1387 : {
1388 0 : struct spdk_nvmf_transport *transport = ctx;
1389 : struct spdk_nvmf_tcp_transport *ttransport;
1390 : struct spdk_nvmf_tcp_port *port;
1391 0 : uint32_t count = 0;
1392 :
1393 0 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1394 :
1395 0 : TAILQ_FOREACH(port, &ttransport->ports, link) {
1396 0 : count += nvmf_tcp_port_accept(transport, port);
1397 : }
1398 :
1399 0 : return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
1400 : }
1401 :
1402 : static void
1403 0 : nvmf_tcp_discover(struct spdk_nvmf_transport *transport,
1404 : struct spdk_nvme_transport_id *trid,
1405 : struct spdk_nvmf_discovery_log_page_entry *entry)
1406 : {
1407 : struct spdk_nvmf_tcp_port *port;
1408 : struct spdk_nvmf_tcp_transport *ttransport;
1409 :
1410 0 : entry->trtype = SPDK_NVMF_TRTYPE_TCP;
1411 0 : entry->adrfam = trid->adrfam;
1412 :
1413 0 : spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1414 0 : spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1415 :
1416 0 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1417 0 : port = nvmf_tcp_find_port(ttransport, trid);
1418 :
1419 0 : assert(port != NULL);
1420 :
1421 0 : if (strcmp(spdk_sock_get_impl_name(port->listen_sock), "ssl") == 0) {
1422 0 : entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_REQUIRED;
1423 0 : entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_TLS_1_3;
1424 : } else {
1425 0 : entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED;
1426 0 : entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE;
1427 : }
1428 0 : }
1429 :
1430 : static struct spdk_nvmf_tcp_control_msg_list *
1431 1 : nvmf_tcp_control_msg_list_create(uint16_t num_messages)
1432 : {
1433 : struct spdk_nvmf_tcp_control_msg_list *list;
1434 : struct spdk_nvmf_tcp_control_msg *msg;
1435 : uint16_t i;
1436 :
1437 1 : list = calloc(1, sizeof(*list));
1438 1 : if (!list) {
1439 0 : SPDK_ERRLOG("Failed to allocate memory for list structure\n");
1440 0 : return NULL;
1441 : }
1442 :
1443 1 : list->msg_buf = spdk_zmalloc(num_messages * SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE,
1444 : NVMF_DATA_BUFFER_ALIGNMENT, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1445 1 : if (!list->msg_buf) {
1446 0 : SPDK_ERRLOG("Failed to allocate memory for control message buffers\n");
1447 0 : free(list);
1448 0 : return NULL;
1449 : }
1450 :
1451 1 : STAILQ_INIT(&list->free_msgs);
1452 :
1453 33 : for (i = 0; i < num_messages; i++) {
1454 32 : msg = (struct spdk_nvmf_tcp_control_msg *)((char *)list->msg_buf + i *
1455 : SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
1456 32 : STAILQ_INSERT_TAIL(&list->free_msgs, msg, link);
1457 : }
1458 :
1459 1 : return list;
1460 : }
1461 :
1462 : static void
1463 1 : nvmf_tcp_control_msg_list_free(struct spdk_nvmf_tcp_control_msg_list *list)
1464 : {
1465 1 : if (!list) {
1466 0 : return;
1467 : }
1468 :
1469 1 : spdk_free(list->msg_buf);
1470 1 : free(list);
1471 : }
1472 :
1473 : static struct spdk_nvmf_transport_poll_group *
1474 1 : nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport,
1475 : struct spdk_nvmf_poll_group *group)
1476 : {
1477 : struct spdk_nvmf_tcp_transport *ttransport;
1478 : struct spdk_nvmf_tcp_poll_group *tgroup;
1479 :
1480 1 : tgroup = calloc(1, sizeof(*tgroup));
1481 1 : if (!tgroup) {
1482 0 : return NULL;
1483 : }
1484 :
1485 1 : tgroup->sock_group = spdk_sock_group_create(&tgroup->group);
1486 1 : if (!tgroup->sock_group) {
1487 0 : goto cleanup;
1488 : }
1489 :
1490 1 : TAILQ_INIT(&tgroup->qpairs);
1491 1 : TAILQ_INIT(&tgroup->await_req);
1492 :
1493 1 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1494 :
1495 1 : if (transport->opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
1496 1 : SPDK_DEBUGLOG(nvmf_tcp, "ICD %u is less than min required for admin/fabric commands (%u). "
1497 : "Creating control messages list\n", transport->opts.in_capsule_data_size,
1498 : SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
1499 1 : tgroup->control_msg_list = nvmf_tcp_control_msg_list_create(ttransport->tcp_opts.control_msg_num);
1500 1 : if (!tgroup->control_msg_list) {
1501 0 : goto cleanup;
1502 : }
1503 : }
1504 :
1505 1 : tgroup->accel_channel = spdk_accel_get_io_channel();
1506 1 : if (spdk_unlikely(!tgroup->accel_channel)) {
1507 0 : SPDK_ERRLOG("Cannot create accel_channel for tgroup=%p\n", tgroup);
1508 0 : goto cleanup;
1509 : }
1510 :
1511 1 : TAILQ_INSERT_TAIL(&ttransport->poll_groups, tgroup, link);
1512 1 : if (ttransport->next_pg == NULL) {
1513 1 : ttransport->next_pg = tgroup;
1514 : }
1515 :
1516 1 : return &tgroup->group;
1517 :
1518 0 : cleanup:
1519 0 : nvmf_tcp_poll_group_destroy(&tgroup->group);
1520 0 : return NULL;
1521 : }
1522 :
1523 : static struct spdk_nvmf_transport_poll_group *
1524 0 : nvmf_tcp_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
1525 : {
1526 : struct spdk_nvmf_tcp_transport *ttransport;
1527 : struct spdk_nvmf_tcp_poll_group **pg;
1528 : struct spdk_nvmf_tcp_qpair *tqpair;
1529 0 : struct spdk_sock_group *group = NULL, *hint = NULL;
1530 : int rc;
1531 :
1532 0 : ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport);
1533 :
1534 0 : if (TAILQ_EMPTY(&ttransport->poll_groups)) {
1535 0 : return NULL;
1536 : }
1537 :
1538 0 : pg = &ttransport->next_pg;
1539 0 : assert(*pg != NULL);
1540 0 : hint = (*pg)->sock_group;
1541 :
1542 0 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
1543 0 : rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group, hint);
1544 0 : if (rc != 0) {
1545 0 : return NULL;
1546 0 : } else if (group != NULL) {
1547 : /* Optimal poll group was found */
1548 0 : return spdk_sock_group_get_ctx(group);
1549 : }
1550 :
1551 : /* The hint was used for optimal poll group, advance next_pg. */
1552 0 : *pg = TAILQ_NEXT(*pg, link);
1553 0 : if (*pg == NULL) {
1554 0 : *pg = TAILQ_FIRST(&ttransport->poll_groups);
1555 : }
1556 :
1557 0 : return spdk_sock_group_get_ctx(hint);
1558 : }
1559 :
1560 : static void
1561 1 : nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1562 : {
1563 : struct spdk_nvmf_tcp_poll_group *tgroup, *next_tgroup;
1564 : struct spdk_nvmf_tcp_transport *ttransport;
1565 :
1566 1 : tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
1567 1 : spdk_sock_group_close(&tgroup->sock_group);
1568 1 : if (tgroup->control_msg_list) {
1569 1 : nvmf_tcp_control_msg_list_free(tgroup->control_msg_list);
1570 : }
1571 :
1572 1 : if (tgroup->accel_channel) {
1573 1 : spdk_put_io_channel(tgroup->accel_channel);
1574 : }
1575 :
1576 1 : if (tgroup->group.transport == NULL) {
1577 : /* Transport can be NULL when nvmf_tcp_poll_group_create()
1578 : * calls this function directly in a failure path. */
1579 0 : free(tgroup);
1580 0 : return;
1581 : }
1582 :
1583 1 : ttransport = SPDK_CONTAINEROF(tgroup->group.transport, struct spdk_nvmf_tcp_transport, transport);
1584 :
1585 1 : next_tgroup = TAILQ_NEXT(tgroup, link);
1586 1 : TAILQ_REMOVE(&ttransport->poll_groups, tgroup, link);
1587 1 : if (next_tgroup == NULL) {
1588 1 : next_tgroup = TAILQ_FIRST(&ttransport->poll_groups);
1589 : }
1590 1 : if (ttransport->next_pg == tgroup) {
1591 1 : ttransport->next_pg = next_tgroup;
1592 : }
1593 :
1594 1 : free(tgroup);
1595 : }
1596 :
1597 : static void
1598 36 : nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
1599 : enum nvme_tcp_pdu_recv_state state)
1600 : {
1601 36 : if (tqpair->recv_state == state) {
1602 18 : SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n",
1603 : tqpair, state);
1604 18 : return;
1605 : }
1606 :
1607 18 : if (spdk_unlikely(state == NVME_TCP_PDU_RECV_STATE_QUIESCING)) {
1608 13 : if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH && tqpair->pdu_in_progress) {
1609 10 : SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, tqpair->pdu_in_progress, slist);
1610 10 : tqpair->tcp_pdu_working_count--;
1611 : }
1612 : }
1613 :
1614 18 : if (spdk_unlikely(state == NVME_TCP_PDU_RECV_STATE_ERROR)) {
1615 0 : assert(tqpair->tcp_pdu_working_count == 0);
1616 : }
1617 :
1618 18 : if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
1619 : /* When leaving the await req state, move the qpair to the main list */
1620 0 : TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link);
1621 0 : TAILQ_INSERT_TAIL(&tqpair->group->qpairs, tqpair, link);
1622 18 : } else if (state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
1623 0 : TAILQ_REMOVE(&tqpair->group->qpairs, tqpair, link);
1624 0 : TAILQ_INSERT_TAIL(&tqpair->group->await_req, tqpair, link);
1625 : }
1626 :
1627 18 : SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv state=%d\n", tqpair, state);
1628 18 : tqpair->recv_state = state;
1629 :
1630 18 : spdk_trace_record(TRACE_TCP_QP_RCV_STATE_CHANGE, tqpair->qpair.trace_id, 0, 0,
1631 : (uint64_t)tqpair->recv_state);
1632 : }
1633 :
1634 : static int
1635 0 : nvmf_tcp_qpair_handle_timeout(void *ctx)
1636 : {
1637 0 : struct spdk_nvmf_tcp_qpair *tqpair = ctx;
1638 :
1639 0 : assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1640 :
1641 0 : SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair,
1642 : SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT);
1643 :
1644 0 : nvmf_tcp_qpair_disconnect(tqpair);
1645 0 : return SPDK_POLLER_BUSY;
1646 : }
1647 :
1648 : static void
1649 0 : nvmf_tcp_send_c2h_term_req_complete(void *cb_arg)
1650 : {
1651 0 : struct spdk_nvmf_tcp_qpair *tqpair = (struct spdk_nvmf_tcp_qpair *)cb_arg;
1652 :
1653 0 : if (!tqpair->timeout_poller) {
1654 0 : tqpair->timeout_poller = SPDK_POLLER_REGISTER(nvmf_tcp_qpair_handle_timeout, tqpair,
1655 : SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT * 1000000);
1656 : }
1657 0 : }
1658 :
1659 : static void
1660 15 : nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1661 : enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset)
1662 : {
1663 : struct nvme_tcp_pdu *rsp_pdu;
1664 : struct spdk_nvme_tcp_term_req_hdr *c2h_term_req;
1665 15 : uint32_t c2h_term_req_hdr_len = sizeof(*c2h_term_req);
1666 : uint32_t copy_len;
1667 :
1668 15 : rsp_pdu = tqpair->mgmt_pdu;
1669 :
1670 15 : c2h_term_req = &rsp_pdu->hdr.term_req;
1671 15 : c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1672 15 : c2h_term_req->common.hlen = c2h_term_req_hdr_len;
1673 15 : c2h_term_req->fes = fes;
1674 :
1675 15 : if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1676 : (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1677 12 : DSET32(&c2h_term_req->fei, error_offset);
1678 : }
1679 :
1680 15 : copy_len = spdk_min(pdu->hdr.common.hlen, SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
1681 :
1682 : /* Copy the error info into the buffer */
1683 15 : memcpy((uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, pdu->hdr.raw, copy_len);
1684 15 : nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, copy_len);
1685 :
1686 : /* Contain the header of the wrong received pdu */
1687 15 : c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len;
1688 15 : tqpair->wait_terminate = true;
1689 15 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1690 15 : nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_c2h_term_req_complete, tqpair);
1691 15 : }
1692 :
1693 : static void
1694 1 : nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
1695 : struct spdk_nvmf_tcp_qpair *tqpair,
1696 : struct nvme_tcp_pdu *pdu)
1697 : {
1698 : struct spdk_nvmf_tcp_req *tcp_req;
1699 :
1700 1 : assert(pdu->psh_valid_bytes == pdu->psh_len);
1701 1 : assert(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
1702 :
1703 1 : tcp_req = nvmf_tcp_req_get(tqpair);
1704 1 : if (!tcp_req) {
1705 : /* Directly return and make the allocation retry again. This can happen if we're
1706 : * using asynchronous writes to send the response to the host or when releasing
1707 : * zero-copy buffers after a response has been sent. In both cases, the host might
1708 : * receive the response before we've finished processing the request and is free to
1709 : * send another one.
1710 : */
1711 0 : if (tqpair->state_cntr[TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST] > 0 ||
1712 0 : tqpair->state_cntr[TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE] > 0) {
1713 0 : return;
1714 : }
1715 :
1716 : /* The host sent more commands than the maximum queue depth. */
1717 0 : SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair);
1718 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
1719 0 : return;
1720 : }
1721 :
1722 1 : pdu->req = tcp_req;
1723 1 : assert(tcp_req->state == TCP_REQUEST_STATE_NEW);
1724 1 : nvmf_tcp_req_process(ttransport, tcp_req);
1725 : }
1726 :
1727 : static void
1728 0 : nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
1729 : struct spdk_nvmf_tcp_qpair *tqpair,
1730 : struct nvme_tcp_pdu *pdu)
1731 : {
1732 : struct spdk_nvmf_tcp_req *tcp_req;
1733 : struct spdk_nvme_tcp_cmd *capsule_cmd;
1734 0 : uint32_t error_offset = 0;
1735 : enum spdk_nvme_tcp_term_req_fes fes;
1736 : struct spdk_nvme_cpl *rsp;
1737 :
1738 0 : capsule_cmd = &pdu->hdr.capsule_cmd;
1739 0 : tcp_req = pdu->req;
1740 0 : assert(tcp_req != NULL);
1741 :
1742 : /* Zero-copy requests don't support ICD */
1743 0 : assert(!spdk_nvmf_request_using_zcopy(&tcp_req->req));
1744 :
1745 0 : if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) {
1746 0 : SPDK_ERRLOG("Expected ICReq capsule_cmd pdu offset <= %d, got %c\n",
1747 : SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET, capsule_cmd->common.pdo);
1748 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1749 0 : error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo);
1750 0 : goto err;
1751 : }
1752 :
1753 0 : rsp = &tcp_req->req.rsp->nvme_cpl;
1754 0 : if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) {
1755 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
1756 : } else {
1757 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
1758 : }
1759 :
1760 0 : nvmf_tcp_req_process(ttransport, tcp_req);
1761 :
1762 0 : return;
1763 0 : err:
1764 0 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1765 : }
1766 :
1767 : static void
1768 1 : nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
1769 : struct spdk_nvmf_tcp_qpair *tqpair,
1770 : struct nvme_tcp_pdu *pdu)
1771 : {
1772 : struct spdk_nvmf_tcp_req *tcp_req;
1773 1 : uint32_t error_offset = 0;
1774 1 : enum spdk_nvme_tcp_term_req_fes fes = 0;
1775 : struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
1776 :
1777 1 : h2c_data = &pdu->hdr.h2c_data;
1778 :
1779 1 : SPDK_DEBUGLOG(nvmf_tcp, "tqpair=%p, r2t_info: datao=%u, datal=%u, cccid=%u, ttag=%u\n",
1780 : tqpair, h2c_data->datao, h2c_data->datal, h2c_data->cccid, h2c_data->ttag);
1781 :
1782 1 : if (h2c_data->ttag > tqpair->resource_count) {
1783 0 : SPDK_DEBUGLOG(nvmf_tcp, "ttag %u is larger than allowed %u.\n", h2c_data->ttag,
1784 : tqpair->resource_count);
1785 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1786 0 : error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag);
1787 0 : goto err;
1788 : }
1789 :
1790 1 : tcp_req = &tqpair->reqs[h2c_data->ttag - 1];
1791 :
1792 1 : if (spdk_unlikely(tcp_req->state != TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER &&
1793 : tcp_req->state != TCP_REQUEST_STATE_AWAITING_R2T_ACK)) {
1794 0 : SPDK_DEBUGLOG(nvmf_tcp, "tcp_req(%p), tqpair=%p, has error state in %d\n", tcp_req, tqpair,
1795 : tcp_req->state);
1796 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1797 0 : error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag);
1798 0 : goto err;
1799 : }
1800 :
1801 1 : if (spdk_unlikely(tcp_req->req.cmd->nvme_cmd.cid != h2c_data->cccid)) {
1802 0 : SPDK_DEBUGLOG(nvmf_tcp, "tcp_req(%p), tqpair=%p, expected %u but %u for cccid.\n", tcp_req, tqpair,
1803 : tcp_req->req.cmd->nvme_cmd.cid, h2c_data->cccid);
1804 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
1805 0 : error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, cccid);
1806 0 : goto err;
1807 : }
1808 :
1809 1 : if (tcp_req->h2c_offset != h2c_data->datao) {
1810 0 : SPDK_DEBUGLOG(nvmf_tcp,
1811 : "tcp_req(%p), tqpair=%p, expected data offset %u, but data offset is %u\n",
1812 : tcp_req, tqpair, tcp_req->h2c_offset, h2c_data->datao);
1813 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1814 0 : goto err;
1815 : }
1816 :
1817 1 : if ((h2c_data->datao + h2c_data->datal) > tcp_req->req.length) {
1818 0 : SPDK_DEBUGLOG(nvmf_tcp,
1819 : "tcp_req(%p), tqpair=%p, (datao=%u + datal=%u) exceeds requested length=%u\n",
1820 : tcp_req, tqpair, h2c_data->datao, h2c_data->datal, tcp_req->req.length);
1821 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
1822 0 : goto err;
1823 : }
1824 :
1825 1 : pdu->req = tcp_req;
1826 :
1827 1 : if (spdk_unlikely(tcp_req->req.dif_enabled)) {
1828 0 : pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
1829 : }
1830 :
1831 1 : nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
1832 : h2c_data->datao, h2c_data->datal);
1833 1 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1834 1 : return;
1835 :
1836 0 : err:
1837 0 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
1838 : }
1839 :
1840 : static void
1841 3 : nvmf_tcp_send_capsule_resp_pdu(struct spdk_nvmf_tcp_req *tcp_req,
1842 : struct spdk_nvmf_tcp_qpair *tqpair)
1843 : {
1844 : struct nvme_tcp_pdu *rsp_pdu;
1845 : struct spdk_nvme_tcp_rsp *capsule_resp;
1846 :
1847 3 : SPDK_DEBUGLOG(nvmf_tcp, "enter, tqpair=%p\n", tqpair);
1848 :
1849 3 : rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req);
1850 3 : assert(rsp_pdu != NULL);
1851 :
1852 3 : capsule_resp = &rsp_pdu->hdr.capsule_resp;
1853 3 : capsule_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
1854 3 : capsule_resp->common.plen = capsule_resp->common.hlen = sizeof(*capsule_resp);
1855 3 : capsule_resp->rccqe = tcp_req->req.rsp->nvme_cpl;
1856 3 : if (tqpair->host_hdgst_enable) {
1857 1 : capsule_resp->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1858 1 : capsule_resp->common.plen += SPDK_NVME_TCP_DIGEST_LEN;
1859 : }
1860 :
1861 3 : nvmf_tcp_qpair_write_req_pdu(tqpair, tcp_req, nvmf_tcp_request_free, tcp_req);
1862 3 : }
1863 :
1864 : static void
1865 0 : nvmf_tcp_pdu_c2h_data_complete(void *cb_arg)
1866 : {
1867 0 : struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
1868 0 : struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair,
1869 : struct spdk_nvmf_tcp_qpair, qpair);
1870 :
1871 0 : assert(tqpair != NULL);
1872 :
1873 0 : if (spdk_unlikely(tcp_req->pdu->rw_offset < tcp_req->req.length)) {
1874 0 : SPDK_DEBUGLOG(nvmf_tcp, "sending another C2H part, offset %u length %u\n", tcp_req->pdu->rw_offset,
1875 : tcp_req->req.length);
1876 0 : _nvmf_tcp_send_c2h_data(tqpair, tcp_req);
1877 0 : return;
1878 : }
1879 :
1880 0 : if (tcp_req->pdu->hdr.c2h_data.common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) {
1881 0 : nvmf_tcp_request_free(tcp_req);
1882 : } else {
1883 0 : nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);
1884 : }
1885 : }
1886 :
1887 : static void
1888 0 : nvmf_tcp_r2t_complete(void *cb_arg)
1889 : {
1890 0 : struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
1891 : struct spdk_nvmf_tcp_transport *ttransport;
1892 :
1893 0 : ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport,
1894 : struct spdk_nvmf_tcp_transport, transport);
1895 :
1896 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
1897 :
1898 0 : if (tcp_req->h2c_offset == tcp_req->req.length) {
1899 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
1900 0 : nvmf_tcp_req_process(ttransport, tcp_req);
1901 : }
1902 0 : }
1903 :
1904 : static void
1905 0 : nvmf_tcp_send_r2t_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
1906 : struct spdk_nvmf_tcp_req *tcp_req)
1907 : {
1908 : struct nvme_tcp_pdu *rsp_pdu;
1909 : struct spdk_nvme_tcp_r2t_hdr *r2t;
1910 :
1911 0 : rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req);
1912 0 : assert(rsp_pdu != NULL);
1913 :
1914 0 : r2t = &rsp_pdu->hdr.r2t;
1915 0 : r2t->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1916 0 : r2t->common.plen = r2t->common.hlen = sizeof(*r2t);
1917 :
1918 0 : if (tqpair->host_hdgst_enable) {
1919 0 : r2t->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1920 0 : r2t->common.plen += SPDK_NVME_TCP_DIGEST_LEN;
1921 : }
1922 :
1923 0 : r2t->cccid = tcp_req->req.cmd->nvme_cmd.cid;
1924 0 : r2t->ttag = tcp_req->ttag;
1925 0 : r2t->r2to = tcp_req->h2c_offset;
1926 0 : r2t->r2tl = tcp_req->req.length;
1927 :
1928 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_R2T_ACK);
1929 :
1930 0 : SPDK_DEBUGLOG(nvmf_tcp,
1931 : "tcp_req(%p) on tqpair(%p), r2t_info: cccid=%u, ttag=%u, r2to=%u, r2tl=%u\n",
1932 : tcp_req, tqpair, r2t->cccid, r2t->ttag, r2t->r2to, r2t->r2tl);
1933 0 : nvmf_tcp_qpair_write_req_pdu(tqpair, tcp_req, nvmf_tcp_r2t_complete, tcp_req);
1934 0 : }
1935 :
1936 : static void
1937 0 : nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
1938 : struct spdk_nvmf_tcp_qpair *tqpair,
1939 : struct nvme_tcp_pdu *pdu)
1940 : {
1941 : struct spdk_nvmf_tcp_req *tcp_req;
1942 : struct spdk_nvme_cpl *rsp;
1943 :
1944 0 : tcp_req = pdu->req;
1945 0 : assert(tcp_req != NULL);
1946 :
1947 0 : SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
1948 :
1949 0 : tcp_req->h2c_offset += pdu->data_len;
1950 :
1951 : /* Wait for all of the data to arrive AND for the initial R2T PDU send to be
1952 : * acknowledged before moving on. */
1953 0 : if (tcp_req->h2c_offset == tcp_req->req.length &&
1954 0 : tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) {
1955 : /* After receiving all the h2c data, we need to check whether there is
1956 : * transient transport error */
1957 0 : rsp = &tcp_req->req.rsp->nvme_cpl;
1958 0 : if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) {
1959 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
1960 : } else {
1961 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
1962 : }
1963 0 : nvmf_tcp_req_process(ttransport, tcp_req);
1964 : }
1965 0 : }
1966 :
1967 : static void
1968 0 : nvmf_tcp_h2c_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *h2c_term_req)
1969 : {
1970 0 : SPDK_ERRLOG("Error info of pdu(%p): %s\n", h2c_term_req,
1971 : spdk_nvmf_tcp_term_req_fes_str[h2c_term_req->fes]);
1972 0 : if ((h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) ||
1973 0 : (h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) {
1974 0 : SPDK_DEBUGLOG(nvmf_tcp, "The offset from the start of the PDU header is %u\n",
1975 : DGET32(h2c_term_req->fei));
1976 : }
1977 0 : }
1978 :
1979 : static void
1980 0 : nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair,
1981 : struct nvme_tcp_pdu *pdu)
1982 : {
1983 0 : struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
1984 0 : uint32_t error_offset = 0;
1985 : enum spdk_nvme_tcp_term_req_fes fes;
1986 :
1987 0 : if (h2c_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
1988 0 : SPDK_ERRLOG("Fatal Error Status(FES) is unknown for h2c_term_req pdu=%p\n", pdu);
1989 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1990 0 : error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
1991 0 : goto end;
1992 : }
1993 :
1994 : /* set the data buffer */
1995 0 : nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + h2c_term_req->common.hlen,
1996 0 : h2c_term_req->common.plen - h2c_term_req->common.hlen);
1997 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
1998 0 : return;
1999 0 : end:
2000 0 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
2001 : }
2002 :
2003 : static void
2004 0 : nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
2005 : struct nvme_tcp_pdu *pdu)
2006 : {
2007 0 : struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req;
2008 :
2009 0 : nvmf_tcp_h2c_term_req_dump(h2c_term_req);
2010 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2011 0 : }
2012 :
2013 : static void
2014 0 : _nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
2015 : {
2016 0 : struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport,
2017 : struct spdk_nvmf_tcp_transport, transport);
2018 :
2019 0 : switch (pdu->hdr.common.pdu_type) {
2020 0 : case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
2021 0 : nvmf_tcp_capsule_cmd_payload_handle(ttransport, tqpair, pdu);
2022 0 : break;
2023 0 : case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
2024 0 : nvmf_tcp_h2c_data_payload_handle(ttransport, tqpair, pdu);
2025 0 : break;
2026 :
2027 0 : case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
2028 0 : nvmf_tcp_h2c_term_req_payload_handle(tqpair, pdu);
2029 0 : break;
2030 :
2031 0 : default:
2032 : /* The code should not go to here */
2033 0 : SPDK_ERRLOG("ERROR pdu type %d\n", pdu->hdr.common.pdu_type);
2034 0 : break;
2035 : }
2036 0 : SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, pdu, slist);
2037 0 : tqpair->tcp_pdu_working_count--;
2038 0 : }
2039 :
2040 : static inline void
2041 1 : nvmf_tcp_req_set_cpl(struct spdk_nvmf_tcp_req *treq, int sct, int sc)
2042 : {
2043 1 : treq->req.rsp->nvme_cpl.status.sct = sct;
2044 1 : treq->req.rsp->nvme_cpl.status.sc = sc;
2045 1 : treq->req.rsp->nvme_cpl.cid = treq->req.cmd->nvme_cmd.cid;
2046 1 : }
2047 :
2048 : static void
2049 0 : data_crc32_calc_done(void *cb_arg, int status)
2050 : {
2051 0 : struct nvme_tcp_pdu *pdu = cb_arg;
2052 0 : struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair;
2053 :
2054 : /* async crc32 calculation is failed and use direct calculation to check */
2055 0 : if (spdk_unlikely(status)) {
2056 0 : SPDK_ERRLOG("Data digest on tqpair=(%p) with pdu=%p failed to be calculated asynchronously\n",
2057 : tqpair, pdu);
2058 0 : pdu->data_digest_crc32 = nvme_tcp_pdu_calc_data_digest(pdu);
2059 : }
2060 0 : pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
2061 0 : if (!MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32)) {
2062 0 : SPDK_ERRLOG("Data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
2063 0 : assert(pdu->req != NULL);
2064 0 : nvmf_tcp_req_set_cpl(pdu->req, SPDK_NVME_SCT_GENERIC,
2065 : SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR);
2066 : }
2067 0 : _nvmf_tcp_pdu_payload_handle(tqpair, pdu);
2068 0 : }
2069 :
2070 : static void
2071 0 : nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
2072 : {
2073 0 : int rc = 0;
2074 0 : assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
2075 0 : tqpair->pdu_in_progress = NULL;
2076 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2077 0 : SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
2078 : /* check data digest if need */
2079 0 : if (pdu->ddgst_enable) {
2080 0 : if (tqpair->qpair.qid != 0 && !pdu->dif_ctx && tqpair->group &&
2081 0 : (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)) {
2082 0 : rc = spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, pdu->data_iov,
2083 : pdu->data_iovcnt, 0, data_crc32_calc_done, pdu);
2084 0 : if (spdk_likely(rc == 0)) {
2085 0 : return;
2086 : }
2087 : } else {
2088 0 : pdu->data_digest_crc32 = nvme_tcp_pdu_calc_data_digest(pdu);
2089 : }
2090 0 : data_crc32_calc_done(pdu, rc);
2091 : } else {
2092 0 : _nvmf_tcp_pdu_payload_handle(tqpair, pdu);
2093 : }
2094 : }
2095 :
2096 : static void
2097 0 : nvmf_tcp_send_icresp_complete(void *cb_arg)
2098 : {
2099 0 : struct spdk_nvmf_tcp_qpair *tqpair = cb_arg;
2100 :
2101 0 : nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_RUNNING);
2102 0 : }
2103 :
2104 : static void
2105 3 : nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
2106 : struct spdk_nvmf_tcp_qpair *tqpair,
2107 : struct nvme_tcp_pdu *pdu)
2108 : {
2109 3 : struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr.ic_req;
2110 : struct nvme_tcp_pdu *rsp_pdu;
2111 : struct spdk_nvme_tcp_ic_resp *ic_resp;
2112 3 : uint32_t error_offset = 0;
2113 : enum spdk_nvme_tcp_term_req_fes fes;
2114 :
2115 : /* Only PFV 0 is defined currently */
2116 3 : if (ic_req->pfv != 0) {
2117 2 : SPDK_ERRLOG("Expected ICReq PFV %u, got %u\n", 0u, ic_req->pfv);
2118 2 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2119 2 : error_offset = offsetof(struct spdk_nvme_tcp_ic_req, pfv);
2120 2 : goto end;
2121 : }
2122 :
2123 : /* This value is 0’s based value in units of dwords should not be larger than SPDK_NVME_TCP_HPDA_MAX */
2124 1 : if (ic_req->hpda > SPDK_NVME_TCP_HPDA_MAX) {
2125 0 : SPDK_ERRLOG("ICReq HPDA out of range 0 to 31, got %u\n", ic_req->hpda);
2126 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2127 0 : error_offset = offsetof(struct spdk_nvme_tcp_ic_req, hpda);
2128 0 : goto end;
2129 : }
2130 :
2131 : /* MAXR2T is 0's based */
2132 1 : SPDK_DEBUGLOG(nvmf_tcp, "maxr2t =%u\n", (ic_req->maxr2t + 1u));
2133 :
2134 1 : tqpair->host_hdgst_enable = ic_req->dgst.bits.hdgst_enable ? true : false;
2135 1 : if (!tqpair->host_hdgst_enable) {
2136 1 : tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
2137 : }
2138 :
2139 1 : tqpair->host_ddgst_enable = ic_req->dgst.bits.ddgst_enable ? true : false;
2140 1 : if (!tqpair->host_ddgst_enable) {
2141 1 : tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
2142 : }
2143 :
2144 1 : tqpair->recv_buf_size = spdk_max(tqpair->recv_buf_size, MIN_SOCK_PIPE_SIZE);
2145 : /* Now that we know whether digests are enabled, properly size the receive buffer */
2146 1 : if (spdk_sock_set_recvbuf(tqpair->sock, tqpair->recv_buf_size) < 0) {
2147 0 : SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n",
2148 : tqpair,
2149 : tqpair->recv_buf_size);
2150 : /* Not fatal. */
2151 : }
2152 :
2153 1 : tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX);
2154 1 : SPDK_DEBUGLOG(nvmf_tcp, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda);
2155 :
2156 1 : rsp_pdu = tqpair->mgmt_pdu;
2157 :
2158 1 : ic_resp = &rsp_pdu->hdr.ic_resp;
2159 1 : ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
2160 1 : ic_resp->common.hlen = ic_resp->common.plen = sizeof(*ic_resp);
2161 1 : ic_resp->pfv = 0;
2162 1 : ic_resp->cpda = tqpair->cpda;
2163 1 : ic_resp->maxh2cdata = ttransport->transport.opts.max_io_size;
2164 1 : ic_resp->dgst.bits.hdgst_enable = tqpair->host_hdgst_enable ? 1 : 0;
2165 1 : ic_resp->dgst.bits.ddgst_enable = tqpair->host_ddgst_enable ? 1 : 0;
2166 :
2167 1 : SPDK_DEBUGLOG(nvmf_tcp, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable);
2168 1 : SPDK_DEBUGLOG(nvmf_tcp, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable);
2169 :
2170 1 : nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INITIALIZING);
2171 1 : nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_icresp_complete, tqpair);
2172 1 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2173 1 : return;
2174 2 : end:
2175 2 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
2176 : }
2177 :
2178 : static void
2179 0 : nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair,
2180 : struct spdk_nvmf_tcp_transport *ttransport)
2181 : {
2182 : struct nvme_tcp_pdu *pdu;
2183 : int rc;
2184 0 : uint32_t crc32c, error_offset = 0;
2185 : enum spdk_nvme_tcp_term_req_fes fes;
2186 :
2187 0 : assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
2188 0 : pdu = tqpair->pdu_in_progress;
2189 :
2190 0 : SPDK_DEBUGLOG(nvmf_tcp, "pdu type of tqpair(%p) is %d\n", tqpair,
2191 : pdu->hdr.common.pdu_type);
2192 : /* check header digest if needed */
2193 0 : if (pdu->has_hdgst) {
2194 0 : SPDK_DEBUGLOG(nvmf_tcp, "Compare the header of pdu=%p on tqpair=%p\n", pdu, tqpair);
2195 0 : crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
2196 0 : rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
2197 0 : if (rc == 0) {
2198 0 : SPDK_ERRLOG("Header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
2199 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR;
2200 0 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
2201 0 : return;
2202 :
2203 : }
2204 : }
2205 :
2206 0 : switch (pdu->hdr.common.pdu_type) {
2207 0 : case SPDK_NVME_TCP_PDU_TYPE_IC_REQ:
2208 0 : nvmf_tcp_icreq_handle(ttransport, tqpair, pdu);
2209 0 : break;
2210 0 : case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
2211 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_REQ);
2212 0 : break;
2213 0 : case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
2214 0 : nvmf_tcp_h2c_data_hdr_handle(ttransport, tqpair, pdu);
2215 0 : break;
2216 :
2217 0 : case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
2218 0 : nvmf_tcp_h2c_term_req_hdr_handle(tqpair, pdu);
2219 0 : break;
2220 :
2221 0 : default:
2222 0 : SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress->hdr.common.pdu_type);
2223 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2224 0 : error_offset = 1;
2225 0 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
2226 0 : break;
2227 : }
2228 : }
2229 :
2230 : static void
2231 11 : nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
2232 : {
2233 : struct nvme_tcp_pdu *pdu;
2234 11 : uint32_t error_offset = 0;
2235 : enum spdk_nvme_tcp_term_req_fes fes;
2236 : uint8_t expected_hlen, pdo;
2237 11 : bool plen_error = false, pdo_error = false;
2238 :
2239 11 : assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
2240 11 : pdu = tqpair->pdu_in_progress;
2241 11 : assert(pdu);
2242 11 : if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) {
2243 4 : if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
2244 1 : SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu);
2245 1 : fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
2246 1 : goto err;
2247 : }
2248 3 : expected_hlen = sizeof(struct spdk_nvme_tcp_ic_req);
2249 3 : if (pdu->hdr.common.plen != expected_hlen) {
2250 1 : plen_error = true;
2251 : }
2252 : } else {
2253 7 : if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
2254 1 : SPDK_ERRLOG("The TCP/IP connection is not negotiated\n");
2255 1 : fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
2256 1 : goto err;
2257 : }
2258 :
2259 6 : switch (pdu->hdr.common.pdu_type) {
2260 2 : case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
2261 2 : expected_hlen = sizeof(struct spdk_nvme_tcp_cmd);
2262 2 : pdo = pdu->hdr.common.pdo;
2263 2 : if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) {
2264 1 : pdo_error = true;
2265 1 : break;
2266 : }
2267 :
2268 1 : if (pdu->hdr.common.plen < expected_hlen) {
2269 1 : plen_error = true;
2270 : }
2271 1 : break;
2272 2 : case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
2273 2 : expected_hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
2274 2 : pdo = pdu->hdr.common.pdo;
2275 2 : if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) {
2276 1 : pdo_error = true;
2277 1 : break;
2278 : }
2279 1 : if (pdu->hdr.common.plen < expected_hlen) {
2280 1 : plen_error = true;
2281 : }
2282 1 : break;
2283 :
2284 1 : case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ:
2285 1 : expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
2286 1 : if ((pdu->hdr.common.plen <= expected_hlen) ||
2287 0 : (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
2288 1 : plen_error = true;
2289 : }
2290 1 : break;
2291 :
2292 1 : default:
2293 1 : SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr.common.pdu_type);
2294 1 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2295 1 : error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
2296 1 : goto err;
2297 : }
2298 : }
2299 :
2300 8 : if (pdu->hdr.common.hlen != expected_hlen) {
2301 1 : SPDK_ERRLOG("PDU type=0x%02x, Expected ICReq header length %u, got %u on tqpair=%p\n",
2302 : pdu->hdr.common.pdu_type,
2303 : expected_hlen, pdu->hdr.common.hlen, tqpair);
2304 1 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2305 1 : error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen);
2306 1 : goto err;
2307 7 : } else if (pdo_error) {
2308 2 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2309 2 : error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo);
2310 5 : } else if (plen_error) {
2311 4 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2312 4 : error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
2313 4 : goto err;
2314 : } else {
2315 1 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
2316 1 : nvme_tcp_pdu_calc_psh_len(tqpair->pdu_in_progress, tqpair->host_hdgst_enable);
2317 1 : return;
2318 : }
2319 10 : err:
2320 10 : nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
2321 : }
2322 :
2323 : static int
2324 0 : nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
2325 : {
2326 0 : int rc = 0;
2327 : struct nvme_tcp_pdu *pdu;
2328 : enum nvme_tcp_pdu_recv_state prev_state;
2329 : uint32_t data_len;
2330 0 : struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport,
2331 : struct spdk_nvmf_tcp_transport, transport);
2332 :
2333 : /* The loop here is to allow for several back-to-back state changes. */
2334 : do {
2335 0 : prev_state = tqpair->recv_state;
2336 0 : SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv pdu entering state %d\n", tqpair, prev_state);
2337 :
2338 0 : pdu = tqpair->pdu_in_progress;
2339 0 : assert(pdu != NULL ||
2340 : tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY ||
2341 : tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING ||
2342 : tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
2343 :
2344 0 : switch (tqpair->recv_state) {
2345 : /* Wait for the common header */
2346 0 : case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
2347 0 : if (!pdu) {
2348 0 : pdu = SLIST_FIRST(&tqpair->tcp_pdu_free_queue);
2349 0 : if (spdk_unlikely(!pdu)) {
2350 0 : return NVME_TCP_PDU_IN_PROGRESS;
2351 : }
2352 0 : SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist);
2353 0 : tqpair->pdu_in_progress = pdu;
2354 0 : tqpair->tcp_pdu_working_count++;
2355 : }
2356 0 : memset(pdu, 0, offsetof(struct nvme_tcp_pdu, qpair));
2357 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
2358 : /* FALLTHROUGH */
2359 0 : case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
2360 0 : if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) {
2361 0 : return rc;
2362 : }
2363 :
2364 0 : rc = nvme_tcp_read_data(tqpair->sock,
2365 0 : sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
2366 0 : (void *)&pdu->hdr.common + pdu->ch_valid_bytes);
2367 0 : if (rc < 0) {
2368 0 : SPDK_DEBUGLOG(nvmf_tcp, "will disconnect tqpair=%p\n", tqpair);
2369 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2370 0 : break;
2371 0 : } else if (rc > 0) {
2372 0 : pdu->ch_valid_bytes += rc;
2373 0 : spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.trace_id, rc, 0);
2374 : }
2375 :
2376 0 : if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
2377 0 : return NVME_TCP_PDU_IN_PROGRESS;
2378 : }
2379 :
2380 : /* The command header of this PDU has now been read from the socket. */
2381 0 : nvmf_tcp_pdu_ch_handle(tqpair);
2382 0 : break;
2383 : /* Wait for the pdu specific header */
2384 0 : case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
2385 0 : rc = nvme_tcp_read_data(tqpair->sock,
2386 0 : pdu->psh_len - pdu->psh_valid_bytes,
2387 0 : (void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
2388 0 : if (rc < 0) {
2389 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2390 0 : break;
2391 0 : } else if (rc > 0) {
2392 0 : spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.trace_id, rc, 0);
2393 0 : pdu->psh_valid_bytes += rc;
2394 : }
2395 :
2396 0 : if (pdu->psh_valid_bytes < pdu->psh_len) {
2397 0 : return NVME_TCP_PDU_IN_PROGRESS;
2398 : }
2399 :
2400 : /* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
2401 0 : nvmf_tcp_pdu_psh_handle(tqpair, ttransport);
2402 0 : break;
2403 : /* Wait for the req slot */
2404 0 : case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ:
2405 0 : nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu);
2406 0 : break;
2407 0 : case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
2408 : /* check whether the data is valid, if not we just return */
2409 0 : if (!pdu->data_len) {
2410 0 : return NVME_TCP_PDU_IN_PROGRESS;
2411 : }
2412 :
2413 0 : data_len = pdu->data_len;
2414 : /* data digest */
2415 0 : if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) &&
2416 : tqpair->host_ddgst_enable)) {
2417 0 : data_len += SPDK_NVME_TCP_DIGEST_LEN;
2418 0 : pdu->ddgst_enable = true;
2419 : }
2420 :
2421 0 : rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
2422 0 : if (rc < 0) {
2423 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2424 0 : break;
2425 : }
2426 0 : pdu->rw_offset += rc;
2427 :
2428 0 : if (pdu->rw_offset < data_len) {
2429 0 : return NVME_TCP_PDU_IN_PROGRESS;
2430 : }
2431 :
2432 : /* Generate and insert DIF to whole data block received if DIF is enabled */
2433 0 : if (spdk_unlikely(pdu->dif_ctx != NULL) &&
2434 0 : spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt, 0, data_len,
2435 : pdu->dif_ctx) != 0) {
2436 0 : SPDK_ERRLOG("DIF generate failed\n");
2437 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
2438 0 : break;
2439 : }
2440 :
2441 : /* All of this PDU has now been read from the socket. */
2442 0 : nvmf_tcp_pdu_payload_handle(tqpair, pdu);
2443 0 : break;
2444 0 : case NVME_TCP_PDU_RECV_STATE_QUIESCING:
2445 0 : if (tqpair->tcp_pdu_working_count != 0) {
2446 0 : return NVME_TCP_PDU_IN_PROGRESS;
2447 : }
2448 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
2449 0 : break;
2450 0 : case NVME_TCP_PDU_RECV_STATE_ERROR:
2451 0 : if (spdk_sock_is_connected(tqpair->sock) && tqpair->wait_terminate) {
2452 0 : return NVME_TCP_PDU_IN_PROGRESS;
2453 : }
2454 0 : return NVME_TCP_PDU_FATAL;
2455 0 : default:
2456 0 : SPDK_ERRLOG("The state(%d) is invalid\n", tqpair->recv_state);
2457 0 : abort();
2458 : break;
2459 : }
2460 0 : } while (tqpair->recv_state != prev_state);
2461 :
2462 0 : return rc;
2463 : }
2464 :
2465 : static inline void *
2466 0 : nvmf_tcp_control_msg_get(struct spdk_nvmf_tcp_control_msg_list *list)
2467 : {
2468 : struct spdk_nvmf_tcp_control_msg *msg;
2469 :
2470 0 : assert(list);
2471 :
2472 0 : msg = STAILQ_FIRST(&list->free_msgs);
2473 0 : if (!msg) {
2474 0 : SPDK_DEBUGLOG(nvmf_tcp, "Out of control messages\n");
2475 0 : return NULL;
2476 : }
2477 0 : STAILQ_REMOVE_HEAD(&list->free_msgs, link);
2478 0 : return msg;
2479 : }
2480 :
2481 : static inline void
2482 0 : nvmf_tcp_control_msg_put(struct spdk_nvmf_tcp_control_msg_list *list, void *_msg)
2483 : {
2484 0 : struct spdk_nvmf_tcp_control_msg *msg = _msg;
2485 :
2486 0 : assert(list);
2487 0 : STAILQ_INSERT_HEAD(&list->free_msgs, msg, link);
2488 0 : }
2489 :
2490 : static int
2491 3 : nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
2492 : struct spdk_nvmf_transport *transport,
2493 : struct spdk_nvmf_transport_poll_group *group)
2494 : {
2495 3 : struct spdk_nvmf_request *req = &tcp_req->req;
2496 : struct spdk_nvme_cmd *cmd;
2497 : struct spdk_nvme_sgl_descriptor *sgl;
2498 : struct spdk_nvmf_tcp_poll_group *tgroup;
2499 : enum spdk_nvme_tcp_term_req_fes fes;
2500 : struct nvme_tcp_pdu *pdu;
2501 : struct spdk_nvmf_tcp_qpair *tqpair;
2502 3 : uint32_t length, error_offset = 0;
2503 :
2504 3 : cmd = &req->cmd->nvme_cmd;
2505 3 : sgl = &cmd->dptr.sgl1;
2506 :
2507 3 : if (sgl->generic.type == SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK &&
2508 3 : sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_TRANSPORT) {
2509 : /* get request length from sgl */
2510 3 : length = sgl->unkeyed.length;
2511 3 : if (spdk_unlikely(length > transport->opts.max_io_size)) {
2512 1 : SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
2513 : length, transport->opts.max_io_size);
2514 1 : fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED;
2515 1 : goto fatal_err;
2516 : }
2517 :
2518 : /* fill request length and populate iovs */
2519 2 : req->length = length;
2520 :
2521 2 : SPDK_DEBUGLOG(nvmf_tcp, "Data requested length= 0x%x\n", length);
2522 :
2523 2 : if (spdk_unlikely(req->dif_enabled)) {
2524 0 : req->dif.orig_length = length;
2525 0 : length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
2526 0 : req->dif.elba_length = length;
2527 : }
2528 :
2529 2 : if (nvmf_ctrlr_use_zcopy(req)) {
2530 0 : SPDK_DEBUGLOG(nvmf_tcp, "Using zero-copy to execute request %p\n", tcp_req);
2531 0 : req->data_from_pool = false;
2532 0 : return 0;
2533 : }
2534 :
2535 2 : if (spdk_nvmf_request_get_buffers(req, group, transport, length)) {
2536 : /* No available buffers. Queue this request up. */
2537 1 : SPDK_DEBUGLOG(nvmf_tcp, "No available large data buffers. Queueing request %p\n",
2538 : tcp_req);
2539 1 : return 0;
2540 : }
2541 :
2542 1 : SPDK_DEBUGLOG(nvmf_tcp, "Request %p took %d buffer/s from central pool, and data=%p\n",
2543 : tcp_req, req->iovcnt, req->iov[0].iov_base);
2544 :
2545 1 : return 0;
2546 0 : } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
2547 0 : sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
2548 0 : uint64_t offset = sgl->address;
2549 0 : uint32_t max_len = transport->opts.in_capsule_data_size;
2550 :
2551 0 : assert(tcp_req->has_in_capsule_data);
2552 : /* Capsule Cmd with In-capsule Data should get data length from pdu header */
2553 0 : tqpair = tcp_req->pdu->qpair;
2554 : /* receiving pdu is not same with the pdu in tcp_req */
2555 0 : pdu = tqpair->pdu_in_progress;
2556 0 : length = pdu->hdr.common.plen - pdu->psh_len - sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
2557 0 : if (tqpair->host_ddgst_enable) {
2558 0 : length -= SPDK_NVME_TCP_DIGEST_LEN;
2559 : }
2560 : /* This error is not defined in NVMe/TCP spec, take this error as fatal error */
2561 0 : if (spdk_unlikely(length != sgl->unkeyed.length)) {
2562 0 : SPDK_ERRLOG("In-Capsule Data length 0x%x is not equal to SGL data length 0x%x\n",
2563 : length, sgl->unkeyed.length);
2564 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
2565 0 : error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen);
2566 0 : goto fatal_err;
2567 : }
2568 :
2569 0 : SPDK_DEBUGLOG(nvmf_tcp, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
2570 : offset, length);
2571 :
2572 : /* The NVMe/TCP transport does not use ICDOFF to control the in-capsule data offset. ICDOFF should be '0' */
2573 0 : if (spdk_unlikely(offset != 0)) {
2574 : /* Not defined fatal error in NVMe/TCP spec, handle this error as a fatal error */
2575 0 : SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " should be ZERO in NVMe/TCP\n", offset);
2576 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER;
2577 0 : error_offset = offsetof(struct spdk_nvme_tcp_cmd, ccsqe.dptr.sgl1.address);
2578 0 : goto fatal_err;
2579 : }
2580 :
2581 0 : if (spdk_unlikely(length > max_len)) {
2582 : /* According to the SPEC we should support ICD up to 8192 bytes for admin and fabric commands */
2583 0 : if (length <= SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE &&
2584 0 : (cmd->opc == SPDK_NVME_OPC_FABRIC || req->qpair->qid == 0)) {
2585 :
2586 : /* Get a buffer from dedicated list */
2587 0 : SPDK_DEBUGLOG(nvmf_tcp, "Getting a buffer from control msg list\n");
2588 0 : tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
2589 0 : assert(tgroup->control_msg_list);
2590 0 : req->iov[0].iov_base = nvmf_tcp_control_msg_get(tgroup->control_msg_list);
2591 0 : if (!req->iov[0].iov_base) {
2592 : /* No available buffers. Queue this request up. */
2593 0 : SPDK_DEBUGLOG(nvmf_tcp, "No available ICD buffers. Queueing request %p\n", tcp_req);
2594 0 : return 0;
2595 : }
2596 : } else {
2597 0 : SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
2598 : length, max_len);
2599 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED;
2600 0 : goto fatal_err;
2601 : }
2602 : } else {
2603 0 : req->iov[0].iov_base = tcp_req->buf;
2604 : }
2605 :
2606 0 : req->length = length;
2607 0 : req->data_from_pool = false;
2608 :
2609 0 : if (spdk_unlikely(req->dif_enabled)) {
2610 0 : length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
2611 0 : req->dif.elba_length = length;
2612 : }
2613 :
2614 0 : req->iov[0].iov_len = length;
2615 0 : req->iovcnt = 1;
2616 :
2617 0 : return 0;
2618 : }
2619 : /* If we want to handle the problem here, then we can't skip the following data segment.
2620 : * Because this function runs before reading data part, now handle all errors as fatal errors. */
2621 0 : SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n",
2622 : sgl->generic.type, sgl->generic.subtype);
2623 0 : fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER;
2624 0 : error_offset = offsetof(struct spdk_nvme_tcp_cmd, ccsqe.dptr.sgl1.generic);
2625 1 : fatal_err:
2626 1 : nvmf_tcp_send_c2h_term_req(tcp_req->pdu->qpair, tcp_req->pdu, fes, error_offset);
2627 1 : return -1;
2628 : }
2629 :
2630 : static inline enum spdk_nvme_media_error_status_code
2631 0 : nvmf_tcp_dif_error_to_compl_status(uint8_t err_type) {
2632 : enum spdk_nvme_media_error_status_code result;
2633 :
2634 0 : switch (err_type)
2635 : {
2636 0 : case SPDK_DIF_REFTAG_ERROR:
2637 0 : result = SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR;
2638 0 : break;
2639 0 : case SPDK_DIF_APPTAG_ERROR:
2640 0 : result = SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR;
2641 0 : break;
2642 0 : case SPDK_DIF_GUARD_ERROR:
2643 0 : result = SPDK_NVME_SC_GUARD_CHECK_ERROR;
2644 0 : break;
2645 0 : default:
2646 0 : SPDK_UNREACHABLE();
2647 : break;
2648 : }
2649 :
2650 0 : return result;
2651 : }
2652 :
2653 : static void
2654 4 : _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
2655 : struct spdk_nvmf_tcp_req *tcp_req)
2656 : {
2657 4 : struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(
2658 : tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport);
2659 : struct nvme_tcp_pdu *rsp_pdu;
2660 : struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
2661 : uint32_t plen, pdo, alignment;
2662 : int rc;
2663 :
2664 4 : SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
2665 :
2666 4 : rsp_pdu = tcp_req->pdu;
2667 4 : assert(rsp_pdu != NULL);
2668 :
2669 4 : c2h_data = &rsp_pdu->hdr.c2h_data;
2670 4 : c2h_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
2671 4 : plen = c2h_data->common.hlen = sizeof(*c2h_data);
2672 :
2673 4 : if (tqpair->host_hdgst_enable) {
2674 0 : plen += SPDK_NVME_TCP_DIGEST_LEN;
2675 0 : c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
2676 : }
2677 :
2678 : /* set the psh */
2679 4 : c2h_data->cccid = tcp_req->req.cmd->nvme_cmd.cid;
2680 4 : c2h_data->datal = tcp_req->req.length - tcp_req->pdu->rw_offset;
2681 4 : c2h_data->datao = tcp_req->pdu->rw_offset;
2682 :
2683 : /* set the padding */
2684 4 : rsp_pdu->padding_len = 0;
2685 4 : pdo = plen;
2686 4 : if (tqpair->cpda) {
2687 0 : alignment = (tqpair->cpda + 1) << 2;
2688 0 : if (plen % alignment != 0) {
2689 0 : pdo = (plen + alignment) / alignment * alignment;
2690 0 : rsp_pdu->padding_len = pdo - plen;
2691 0 : plen = pdo;
2692 : }
2693 : }
2694 :
2695 4 : c2h_data->common.pdo = pdo;
2696 4 : plen += c2h_data->datal;
2697 4 : if (tqpair->host_ddgst_enable) {
2698 0 : c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF;
2699 0 : plen += SPDK_NVME_TCP_DIGEST_LEN;
2700 : }
2701 :
2702 4 : c2h_data->common.plen = plen;
2703 :
2704 4 : if (spdk_unlikely(tcp_req->req.dif_enabled)) {
2705 0 : rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
2706 : }
2707 :
2708 4 : nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
2709 : c2h_data->datao, c2h_data->datal);
2710 :
2711 :
2712 4 : c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
2713 : /* Need to send the capsule response if response is not all 0 */
2714 4 : if (ttransport->tcp_opts.c2h_success &&
2715 2 : tcp_req->rsp.cdw0 == 0 && tcp_req->rsp.cdw1 == 0) {
2716 1 : c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
2717 : }
2718 :
2719 4 : if (spdk_unlikely(tcp_req->req.dif_enabled)) {
2720 0 : struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl;
2721 0 : struct spdk_dif_error err_blk = {};
2722 0 : uint32_t mapped_length = 0;
2723 0 : uint32_t available_iovs = SPDK_COUNTOF(rsp_pdu->iov);
2724 0 : uint32_t ddgst_len = 0;
2725 :
2726 0 : if (tqpair->host_ddgst_enable) {
2727 : /* Data digest consumes additional iov entry */
2728 0 : available_iovs--;
2729 : /* plen needs to be updated since nvme_tcp_build_iovs compares expected and actual plen */
2730 0 : ddgst_len = SPDK_NVME_TCP_DIGEST_LEN;
2731 0 : c2h_data->common.plen -= ddgst_len;
2732 : }
2733 : /* Temp call to estimate if data can be described by limited number of iovs.
2734 : * iov vector will be rebuilt in nvmf_tcp_qpair_write_pdu */
2735 0 : nvme_tcp_build_iovs(rsp_pdu->iov, available_iovs, rsp_pdu, tqpair->host_hdgst_enable,
2736 : false, &mapped_length);
2737 :
2738 0 : if (mapped_length != c2h_data->common.plen) {
2739 0 : c2h_data->datal = mapped_length - (c2h_data->common.plen - c2h_data->datal);
2740 0 : SPDK_DEBUGLOG(nvmf_tcp,
2741 : "Part C2H, data_len %u (of %u), PDU len %u, updated PDU len %u, offset %u\n",
2742 : c2h_data->datal, tcp_req->req.length, c2h_data->common.plen, mapped_length, rsp_pdu->rw_offset);
2743 0 : c2h_data->common.plen = mapped_length;
2744 :
2745 : /* Rebuild pdu->data_iov since data length is changed */
2746 0 : nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, c2h_data->datao,
2747 : c2h_data->datal);
2748 :
2749 0 : c2h_data->common.flags &= ~(SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU |
2750 : SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS);
2751 : }
2752 :
2753 0 : c2h_data->common.plen += ddgst_len;
2754 :
2755 0 : assert(rsp_pdu->rw_offset <= tcp_req->req.length);
2756 :
2757 0 : rc = spdk_dif_verify_stream(rsp_pdu->data_iov, rsp_pdu->data_iovcnt,
2758 : 0, rsp_pdu->data_len, rsp_pdu->dif_ctx, &err_blk);
2759 0 : if (rc != 0) {
2760 0 : SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n",
2761 : err_blk.err_type, err_blk.err_offset);
2762 0 : rsp->status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
2763 0 : rsp->status.sc = nvmf_tcp_dif_error_to_compl_status(err_blk.err_type);
2764 0 : nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);
2765 0 : return;
2766 : }
2767 : }
2768 :
2769 4 : rsp_pdu->rw_offset += c2h_data->datal;
2770 4 : nvmf_tcp_qpair_write_req_pdu(tqpair, tcp_req, nvmf_tcp_pdu_c2h_data_complete, tcp_req);
2771 : }
2772 :
2773 : static void
2774 4 : nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
2775 : struct spdk_nvmf_tcp_req *tcp_req)
2776 : {
2777 4 : nvmf_tcp_req_pdu_init(tcp_req);
2778 4 : _nvmf_tcp_send_c2h_data(tqpair, tcp_req);
2779 4 : }
2780 :
2781 : static int
2782 1 : request_transfer_out(struct spdk_nvmf_request *req)
2783 : {
2784 : struct spdk_nvmf_tcp_req *tcp_req;
2785 : struct spdk_nvmf_qpair *qpair;
2786 : struct spdk_nvmf_tcp_qpair *tqpair;
2787 : struct spdk_nvme_cpl *rsp;
2788 :
2789 1 : SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
2790 :
2791 1 : qpair = req->qpair;
2792 1 : rsp = &req->rsp->nvme_cpl;
2793 1 : tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
2794 :
2795 : /* Advance our sq_head pointer */
2796 1 : if (qpair->sq_head == qpair->sq_head_max) {
2797 1 : qpair->sq_head = 0;
2798 : } else {
2799 0 : qpair->sq_head++;
2800 : }
2801 1 : rsp->sqhd = qpair->sq_head;
2802 :
2803 1 : tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
2804 1 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
2805 1 : if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2806 0 : nvmf_tcp_send_c2h_data(tqpair, tcp_req);
2807 : } else {
2808 1 : nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);
2809 : }
2810 :
2811 1 : return 0;
2812 : }
2813 :
2814 : static void
2815 4 : nvmf_tcp_check_fused_ordering(struct spdk_nvmf_tcp_transport *ttransport,
2816 : struct spdk_nvmf_tcp_qpair *tqpair,
2817 : struct spdk_nvmf_tcp_req *tcp_req)
2818 : {
2819 : enum spdk_nvme_cmd_fuse last, next;
2820 :
2821 4 : last = tqpair->fused_first ? tqpair->fused_first->cmd.fuse : SPDK_NVME_CMD_FUSE_NONE;
2822 4 : next = tcp_req->cmd.fuse;
2823 :
2824 4 : assert(last != SPDK_NVME_CMD_FUSE_SECOND);
2825 :
2826 4 : if (spdk_likely(last == SPDK_NVME_CMD_FUSE_NONE && next == SPDK_NVME_CMD_FUSE_NONE)) {
2827 4 : return;
2828 : }
2829 :
2830 0 : if (last == SPDK_NVME_CMD_FUSE_FIRST) {
2831 0 : if (next == SPDK_NVME_CMD_FUSE_SECOND) {
2832 : /* This is a valid pair of fused commands. Point them at each other
2833 : * so they can be submitted consecutively once ready to be executed.
2834 : */
2835 0 : tqpair->fused_first->fused_pair = tcp_req;
2836 0 : tcp_req->fused_pair = tqpair->fused_first;
2837 0 : tqpair->fused_first = NULL;
2838 0 : return;
2839 : } else {
2840 : /* Mark the last req as failed since it wasn't followed by a SECOND. */
2841 0 : tqpair->fused_first->fused_failed = true;
2842 :
2843 : /*
2844 : * If the last req is in READY_TO_EXECUTE state, then call
2845 : * nvmf_tcp_req_process(), otherwise nothing else will kick it.
2846 : */
2847 0 : if (tqpair->fused_first->state == TCP_REQUEST_STATE_READY_TO_EXECUTE) {
2848 0 : nvmf_tcp_req_process(ttransport, tqpair->fused_first);
2849 : }
2850 :
2851 0 : tqpair->fused_first = NULL;
2852 : }
2853 : }
2854 :
2855 0 : if (next == SPDK_NVME_CMD_FUSE_FIRST) {
2856 : /* Set tqpair->fused_first here so that we know to check that the next request
2857 : * is a SECOND (and to fail this one if it isn't).
2858 : */
2859 0 : tqpair->fused_first = tcp_req;
2860 0 : } else if (next == SPDK_NVME_CMD_FUSE_SECOND) {
2861 : /* Mark this req failed since it is a SECOND and the last one was not a FIRST. */
2862 0 : tcp_req->fused_failed = true;
2863 : }
2864 : }
2865 :
2866 : static bool
2867 4 : nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
2868 : struct spdk_nvmf_tcp_req *tcp_req)
2869 : {
2870 : struct spdk_nvmf_tcp_qpair *tqpair;
2871 : uint32_t plen;
2872 : struct nvme_tcp_pdu *pdu;
2873 : enum spdk_nvmf_tcp_req_state prev_state;
2874 4 : bool progress = false;
2875 4 : struct spdk_nvmf_transport *transport = &ttransport->transport;
2876 : struct spdk_nvmf_transport_poll_group *group;
2877 : struct spdk_nvmf_tcp_poll_group *tgroup;
2878 :
2879 4 : tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
2880 4 : group = &tqpair->group->group;
2881 4 : assert(tcp_req->state != TCP_REQUEST_STATE_FREE);
2882 :
2883 : /* If the qpair is not active, we need to abort the outstanding requests. */
2884 4 : if (!spdk_nvmf_qpair_is_active(&tqpair->qpair)) {
2885 0 : if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) {
2886 0 : STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link);
2887 : }
2888 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED);
2889 : }
2890 :
2891 : /* The loop here is to allow for several back-to-back state changes. */
2892 : do {
2893 10 : prev_state = tcp_req->state;
2894 :
2895 10 : SPDK_DEBUGLOG(nvmf_tcp, "Request %p entering state %d on tqpair=%p\n", tcp_req, prev_state,
2896 : tqpair);
2897 :
2898 10 : switch (tcp_req->state) {
2899 0 : case TCP_REQUEST_STATE_FREE:
2900 : /* Some external code must kick a request into TCP_REQUEST_STATE_NEW
2901 : * to escape this state. */
2902 0 : break;
2903 4 : case TCP_REQUEST_STATE_NEW:
2904 4 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, tqpair->qpair.trace_id, 0, (uintptr_t)tcp_req,
2905 : tqpair->qpair.queue_depth);
2906 :
2907 : /* copy the cmd from the receive pdu */
2908 4 : tcp_req->cmd = tqpair->pdu_in_progress->hdr.capsule_cmd.ccsqe;
2909 :
2910 4 : if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) {
2911 0 : tcp_req->req.dif_enabled = true;
2912 0 : tqpair->pdu_in_progress->dif_ctx = &tcp_req->req.dif.dif_ctx;
2913 : }
2914 :
2915 4 : nvmf_tcp_check_fused_ordering(ttransport, tqpair, tcp_req);
2916 :
2917 : /* The next state transition depends on the data transfer needs of this request. */
2918 4 : tcp_req->req.xfer = spdk_nvmf_req_get_xfer(&tcp_req->req);
2919 :
2920 4 : if (spdk_unlikely(tcp_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) {
2921 1 : nvmf_tcp_req_set_cpl(tcp_req, SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_OPCODE);
2922 1 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2923 1 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
2924 1 : SPDK_DEBUGLOG(nvmf_tcp, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", tcp_req);
2925 1 : break;
2926 : }
2927 :
2928 : /* If no data to transfer, ready to execute. */
2929 3 : if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) {
2930 : /* Reset the tqpair receiving pdu state */
2931 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2932 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
2933 0 : break;
2934 : }
2935 :
2936 3 : pdu = tqpair->pdu_in_progress;
2937 3 : plen = pdu->hdr.common.hlen;
2938 3 : if (tqpair->host_hdgst_enable) {
2939 0 : plen += SPDK_NVME_TCP_DIGEST_LEN;
2940 : }
2941 3 : if (pdu->hdr.common.plen != plen) {
2942 3 : tcp_req->has_in_capsule_data = true;
2943 : } else {
2944 : /* Data is transmitted by C2H PDUs */
2945 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
2946 : }
2947 :
2948 3 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER);
2949 3 : STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link);
2950 3 : break;
2951 3 : case TCP_REQUEST_STATE_NEED_BUFFER:
2952 3 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, tqpair->qpair.trace_id, 0,
2953 : (uintptr_t)tcp_req);
2954 :
2955 3 : assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);
2956 :
2957 3 : if (!tcp_req->has_in_capsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) {
2958 0 : SPDK_DEBUGLOG(nvmf_tcp,
2959 : "Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n",
2960 : tcp_req, tqpair);
2961 : /* This request needs to wait in line to obtain a buffer */
2962 0 : break;
2963 : }
2964 :
2965 : /* Try to get a data buffer */
2966 3 : if (nvmf_tcp_req_parse_sgl(tcp_req, transport, group) < 0) {
2967 1 : break;
2968 : }
2969 :
2970 : /* Get a zcopy buffer if the request can be serviced through zcopy */
2971 2 : if (spdk_nvmf_request_using_zcopy(&tcp_req->req)) {
2972 0 : if (spdk_unlikely(tcp_req->req.dif_enabled)) {
2973 0 : assert(tcp_req->req.dif.elba_length >= tcp_req->req.length);
2974 0 : tcp_req->req.length = tcp_req->req.dif.elba_length;
2975 : }
2976 :
2977 0 : STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link);
2978 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_START);
2979 0 : spdk_nvmf_request_zcopy_start(&tcp_req->req);
2980 0 : break;
2981 : }
2982 :
2983 2 : if (tcp_req->req.iovcnt < 1) {
2984 1 : SPDK_DEBUGLOG(nvmf_tcp, "No buffer allocated for tcp_req(%p) on tqpair(%p\n)",
2985 : tcp_req, tqpair);
2986 : /* No buffers available. */
2987 1 : break;
2988 : }
2989 :
2990 1 : STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link);
2991 :
2992 : /* If data is transferring from host to controller, we need to do a transfer from the host. */
2993 1 : if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
2994 1 : if (tcp_req->req.data_from_pool) {
2995 0 : SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
2996 0 : nvmf_tcp_send_r2t_pdu(tqpair, tcp_req);
2997 : } else {
2998 : struct nvme_tcp_pdu *pdu;
2999 :
3000 1 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
3001 :
3002 1 : pdu = tqpair->pdu_in_progress;
3003 1 : SPDK_DEBUGLOG(nvmf_tcp, "Not need to send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req,
3004 : tqpair);
3005 : /* No need to send r2t, contained in the capsuled data */
3006 1 : nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt,
3007 : 0, tcp_req->req.length);
3008 1 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
3009 : }
3010 1 : break;
3011 : }
3012 :
3013 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
3014 0 : break;
3015 0 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_START:
3016 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_START, tqpair->qpair.trace_id, 0,
3017 : (uintptr_t)tcp_req);
3018 : /* Some external code must kick a request into TCP_REQUEST_STATE_ZCOPY_START_COMPLETED
3019 : * to escape this state. */
3020 0 : break;
3021 0 : case TCP_REQUEST_STATE_ZCOPY_START_COMPLETED:
3022 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED, tqpair->qpair.trace_id, 0,
3023 : (uintptr_t)tcp_req);
3024 0 : if (spdk_unlikely(spdk_nvme_cpl_is_error(&tcp_req->req.rsp->nvme_cpl))) {
3025 0 : SPDK_DEBUGLOG(nvmf_tcp, "Zero-copy start failed for tcp_req(%p) on tqpair=%p\n",
3026 : tcp_req, tqpair);
3027 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
3028 0 : break;
3029 : }
3030 0 : if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
3031 0 : SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
3032 0 : nvmf_tcp_send_r2t_pdu(tqpair, tcp_req);
3033 : } else {
3034 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED);
3035 : }
3036 0 : break;
3037 0 : case TCP_REQUEST_STATE_AWAITING_R2T_ACK:
3038 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, tqpair->qpair.trace_id, 0,
3039 : (uintptr_t)tcp_req);
3040 : /* The R2T completion or the h2c data incoming will kick it out of this state. */
3041 0 : break;
3042 1 : case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
3043 :
3044 1 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, tqpair->qpair.trace_id,
3045 : 0, (uintptr_t)tcp_req);
3046 : /* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE
3047 : * to escape this state. */
3048 1 : break;
3049 0 : case TCP_REQUEST_STATE_READY_TO_EXECUTE:
3050 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, tqpair->qpair.trace_id, 0,
3051 : (uintptr_t)tcp_req);
3052 :
3053 0 : if (spdk_unlikely(tcp_req->req.dif_enabled)) {
3054 0 : assert(tcp_req->req.dif.elba_length >= tcp_req->req.length);
3055 0 : tcp_req->req.length = tcp_req->req.dif.elba_length;
3056 : }
3057 :
3058 0 : if (tcp_req->cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) {
3059 0 : if (tcp_req->fused_failed) {
3060 : /* This request failed FUSED semantics. Fail it immediately, without
3061 : * even sending it to the target layer.
3062 : */
3063 0 : nvmf_tcp_req_set_cpl(tcp_req, SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_ABORTED_MISSING_FUSED);
3064 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
3065 0 : break;
3066 : }
3067 :
3068 0 : if (tcp_req->fused_pair == NULL ||
3069 0 : tcp_req->fused_pair->state != TCP_REQUEST_STATE_READY_TO_EXECUTE) {
3070 : /* This request is ready to execute, but either we don't know yet if it's
3071 : * valid - i.e. this is a FIRST but we haven't received the next request yet),
3072 : * or the other request of this fused pair isn't ready to execute. So
3073 : * break here and this request will get processed later either when the
3074 : * other request is ready or we find that this request isn't valid.
3075 : */
3076 : break;
3077 : }
3078 : }
3079 :
3080 0 : if (!spdk_nvmf_request_using_zcopy(&tcp_req->req)) {
3081 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING);
3082 : /* If we get to this point, and this request is a fused command, we know that
3083 : * it is part of a valid sequence (FIRST followed by a SECOND) and that both
3084 : * requests are READY_TO_EXECUTE. So call spdk_nvmf_request_exec() both on this
3085 : * request, and the other request of the fused pair, in the correct order.
3086 : * Also clear the ->fused_pair pointers on both requests, since after this point
3087 : * we no longer need to maintain the relationship between these two requests.
3088 : */
3089 0 : if (tcp_req->cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND) {
3090 0 : assert(tcp_req->fused_pair != NULL);
3091 0 : assert(tcp_req->fused_pair->fused_pair == tcp_req);
3092 0 : nvmf_tcp_req_set_state(tcp_req->fused_pair, TCP_REQUEST_STATE_EXECUTING);
3093 0 : spdk_nvmf_request_exec(&tcp_req->fused_pair->req);
3094 0 : tcp_req->fused_pair->fused_pair = NULL;
3095 0 : tcp_req->fused_pair = NULL;
3096 : }
3097 0 : spdk_nvmf_request_exec(&tcp_req->req);
3098 0 : if (tcp_req->cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST) {
3099 0 : assert(tcp_req->fused_pair != NULL);
3100 0 : assert(tcp_req->fused_pair->fused_pair == tcp_req);
3101 0 : nvmf_tcp_req_set_state(tcp_req->fused_pair, TCP_REQUEST_STATE_EXECUTING);
3102 0 : spdk_nvmf_request_exec(&tcp_req->fused_pair->req);
3103 0 : tcp_req->fused_pair->fused_pair = NULL;
3104 0 : tcp_req->fused_pair = NULL;
3105 : }
3106 : } else {
3107 : /* For zero-copy, only requests with data coming from host to the
3108 : * controller can end up here. */
3109 0 : assert(tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
3110 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT);
3111 0 : spdk_nvmf_request_zcopy_end(&tcp_req->req, true);
3112 : }
3113 :
3114 0 : break;
3115 0 : case TCP_REQUEST_STATE_EXECUTING:
3116 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, tqpair->qpair.trace_id, 0, (uintptr_t)tcp_req);
3117 : /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED
3118 : * to escape this state. */
3119 0 : break;
3120 0 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT:
3121 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT, tqpair->qpair.trace_id, 0,
3122 : (uintptr_t)tcp_req);
3123 : /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED
3124 : * to escape this state. */
3125 0 : break;
3126 0 : case TCP_REQUEST_STATE_EXECUTED:
3127 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, tqpair->qpair.trace_id, 0, (uintptr_t)tcp_req);
3128 :
3129 0 : if (spdk_unlikely(tcp_req->req.dif_enabled)) {
3130 0 : tcp_req->req.length = tcp_req->req.dif.orig_length;
3131 : }
3132 :
3133 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
3134 0 : break;
3135 1 : case TCP_REQUEST_STATE_READY_TO_COMPLETE:
3136 1 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, tqpair->qpair.trace_id, 0,
3137 : (uintptr_t)tcp_req);
3138 1 : if (request_transfer_out(&tcp_req->req) != 0) {
3139 0 : assert(0); /* No good way to handle this currently */
3140 : }
3141 1 : break;
3142 1 : case TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
3143 1 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, tqpair->qpair.trace_id,
3144 : 0, (uintptr_t)tcp_req);
3145 : /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED
3146 : * to escape this state. */
3147 1 : break;
3148 0 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE:
3149 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE, tqpair->qpair.trace_id, 0,
3150 : (uintptr_t)tcp_req);
3151 : /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED
3152 : * to escape this state. */
3153 0 : break;
3154 0 : case TCP_REQUEST_STATE_COMPLETED:
3155 0 : spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, tqpair->qpair.trace_id, 0, (uintptr_t)tcp_req,
3156 : tqpair->qpair.queue_depth);
3157 : /* If there's an outstanding PDU sent to the host, the request is completed
3158 : * due to the qpair being disconnected. We must delay the completion until
3159 : * that write is done to avoid freeing the request twice. */
3160 0 : if (spdk_unlikely(tcp_req->pdu_in_use)) {
3161 0 : SPDK_DEBUGLOG(nvmf_tcp, "Delaying completion due to outstanding "
3162 : "write on req=%p\n", tcp_req);
3163 : /* This can only happen for zcopy requests */
3164 0 : assert(spdk_nvmf_request_using_zcopy(&tcp_req->req));
3165 0 : assert(!spdk_nvmf_qpair_is_active(&tqpair->qpair));
3166 0 : break;
3167 : }
3168 :
3169 0 : if (tcp_req->req.data_from_pool) {
3170 0 : spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport);
3171 0 : } else if (spdk_unlikely(tcp_req->has_in_capsule_data &&
3172 : (tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC ||
3173 : tqpair->qpair.qid == 0) && tcp_req->req.length > transport->opts.in_capsule_data_size)) {
3174 0 : tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
3175 0 : assert(tgroup->control_msg_list);
3176 0 : SPDK_DEBUGLOG(nvmf_tcp, "Put buf to control msg list\n");
3177 0 : nvmf_tcp_control_msg_put(tgroup->control_msg_list,
3178 : tcp_req->req.iov[0].iov_base);
3179 0 : } else if (tcp_req->req.zcopy_bdev_io != NULL) {
3180 : /* If the request has an unreleased zcopy bdev_io, it's either a
3181 : * read, a failed write, or the qpair is being disconnected */
3182 0 : assert(spdk_nvmf_request_using_zcopy(&tcp_req->req));
3183 0 : assert(tcp_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST ||
3184 : spdk_nvme_cpl_is_error(&tcp_req->req.rsp->nvme_cpl) ||
3185 : !spdk_nvmf_qpair_is_active(&tqpair->qpair));
3186 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE);
3187 0 : spdk_nvmf_request_zcopy_end(&tcp_req->req, false);
3188 0 : break;
3189 : }
3190 0 : tcp_req->req.length = 0;
3191 0 : tcp_req->req.iovcnt = 0;
3192 0 : tcp_req->fused_failed = false;
3193 0 : if (tcp_req->fused_pair) {
3194 : /* This req was part of a valid fused pair, but failed before it got to
3195 : * READ_TO_EXECUTE state. This means we need to fail the other request
3196 : * in the pair, because it is no longer part of a valid pair. If the pair
3197 : * already reached READY_TO_EXECUTE state, we need to kick it.
3198 : */
3199 0 : tcp_req->fused_pair->fused_failed = true;
3200 0 : if (tcp_req->fused_pair->state == TCP_REQUEST_STATE_READY_TO_EXECUTE) {
3201 0 : nvmf_tcp_req_process(ttransport, tcp_req->fused_pair);
3202 : }
3203 0 : tcp_req->fused_pair = NULL;
3204 : }
3205 :
3206 0 : nvmf_tcp_req_put(tqpair, tcp_req);
3207 0 : break;
3208 0 : case TCP_REQUEST_NUM_STATES:
3209 : default:
3210 0 : assert(0);
3211 : break;
3212 : }
3213 :
3214 10 : if (tcp_req->state != prev_state) {
3215 6 : progress = true;
3216 : }
3217 10 : } while (tcp_req->state != prev_state);
3218 :
3219 4 : return progress;
3220 : }
3221 :
3222 : static void
3223 0 : nvmf_tcp_sock_cb(void *arg, struct spdk_sock_group *group, struct spdk_sock *sock)
3224 : {
3225 0 : struct spdk_nvmf_tcp_qpair *tqpair = arg;
3226 : int rc;
3227 :
3228 0 : assert(tqpair != NULL);
3229 0 : rc = nvmf_tcp_sock_process(tqpair);
3230 :
3231 : /* If there was a new socket error, disconnect */
3232 0 : if (rc < 0) {
3233 0 : nvmf_tcp_qpair_disconnect(tqpair);
3234 : }
3235 0 : }
3236 :
3237 : static int
3238 0 : nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
3239 : struct spdk_nvmf_qpair *qpair)
3240 : {
3241 : struct spdk_nvmf_tcp_poll_group *tgroup;
3242 : struct spdk_nvmf_tcp_qpair *tqpair;
3243 : int rc;
3244 :
3245 0 : tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
3246 0 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
3247 :
3248 0 : rc = nvmf_tcp_qpair_sock_init(tqpair);
3249 0 : if (rc != 0) {
3250 0 : SPDK_ERRLOG("Cannot set sock opt for tqpair=%p\n", tqpair);
3251 0 : return -1;
3252 : }
3253 :
3254 0 : rc = nvmf_tcp_qpair_init(&tqpair->qpair);
3255 0 : if (rc < 0) {
3256 0 : SPDK_ERRLOG("Cannot init tqpair=%p\n", tqpair);
3257 0 : return -1;
3258 : }
3259 :
3260 0 : rc = nvmf_tcp_qpair_init_mem_resource(tqpair);
3261 0 : if (rc < 0) {
3262 0 : SPDK_ERRLOG("Cannot init memory resource info for tqpair=%p\n", tqpair);
3263 0 : return -1;
3264 : }
3265 :
3266 0 : rc = spdk_sock_group_add_sock(tgroup->sock_group, tqpair->sock,
3267 : nvmf_tcp_sock_cb, tqpair);
3268 0 : if (rc != 0) {
3269 0 : SPDK_ERRLOG("Could not add sock to sock_group: %s (%d)\n",
3270 : spdk_strerror(errno), errno);
3271 0 : return -1;
3272 : }
3273 :
3274 0 : tqpair->group = tgroup;
3275 0 : nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INVALID);
3276 0 : TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link);
3277 :
3278 0 : return 0;
3279 : }
3280 :
3281 : static int
3282 0 : nvmf_tcp_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
3283 : struct spdk_nvmf_qpair *qpair)
3284 : {
3285 : struct spdk_nvmf_tcp_poll_group *tgroup;
3286 : struct spdk_nvmf_tcp_qpair *tqpair;
3287 : int rc;
3288 :
3289 0 : tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
3290 0 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
3291 :
3292 0 : assert(tqpair->group == tgroup);
3293 :
3294 0 : SPDK_DEBUGLOG(nvmf_tcp, "remove tqpair=%p from the tgroup=%p\n", tqpair, tgroup);
3295 0 : if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) {
3296 : /* Change the state to move the qpair from the await_req list to the main list
3297 : * and prevent adding it again later by nvmf_tcp_qpair_set_recv_state() */
3298 0 : nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_QUIESCING);
3299 : }
3300 0 : TAILQ_REMOVE(&tgroup->qpairs, tqpair, link);
3301 :
3302 : /* Try to force out any pending writes */
3303 0 : spdk_sock_flush(tqpair->sock);
3304 :
3305 0 : rc = spdk_sock_group_remove_sock(tgroup->sock_group, tqpair->sock);
3306 0 : if (rc != 0) {
3307 0 : SPDK_ERRLOG("Could not remove sock from sock_group: %s (%d)\n",
3308 : spdk_strerror(errno), errno);
3309 : }
3310 :
3311 0 : return rc;
3312 : }
3313 :
3314 : static int
3315 0 : nvmf_tcp_req_complete(struct spdk_nvmf_request *req)
3316 : {
3317 : struct spdk_nvmf_tcp_transport *ttransport;
3318 : struct spdk_nvmf_tcp_req *tcp_req;
3319 :
3320 0 : ttransport = SPDK_CONTAINEROF(req->qpair->transport, struct spdk_nvmf_tcp_transport, transport);
3321 0 : tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
3322 :
3323 0 : switch (tcp_req->state) {
3324 0 : case TCP_REQUEST_STATE_EXECUTING:
3325 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT:
3326 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED);
3327 0 : break;
3328 0 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_START:
3329 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_ZCOPY_START_COMPLETED);
3330 0 : break;
3331 0 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE:
3332 0 : nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED);
3333 0 : break;
3334 0 : default:
3335 0 : SPDK_ERRLOG("Unexpected request state %d\n", tcp_req->state);
3336 0 : assert(0 && "Unexpected request state");
3337 : break;
3338 : }
3339 :
3340 0 : nvmf_tcp_req_process(ttransport, tcp_req);
3341 :
3342 0 : return 0;
3343 : }
3344 :
3345 : static void
3346 0 : nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair,
3347 : spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
3348 : {
3349 : struct spdk_nvmf_tcp_qpair *tqpair;
3350 :
3351 0 : SPDK_DEBUGLOG(nvmf_tcp, "Qpair: %p\n", qpair);
3352 :
3353 0 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
3354 :
3355 0 : assert(tqpair->fini_cb_fn == NULL);
3356 0 : tqpair->fini_cb_fn = cb_fn;
3357 0 : tqpair->fini_cb_arg = cb_arg;
3358 :
3359 0 : nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITED);
3360 0 : nvmf_tcp_qpair_destroy(tqpair);
3361 0 : }
3362 :
3363 : static int
3364 0 : nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
3365 : {
3366 : struct spdk_nvmf_tcp_poll_group *tgroup;
3367 : int rc;
3368 : struct spdk_nvmf_request *req, *req_tmp;
3369 : struct spdk_nvmf_tcp_req *tcp_req;
3370 : struct spdk_nvmf_tcp_qpair *tqpair, *tqpair_tmp;
3371 0 : struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(group->transport,
3372 : struct spdk_nvmf_tcp_transport, transport);
3373 :
3374 0 : tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
3375 :
3376 0 : if (spdk_unlikely(TAILQ_EMPTY(&tgroup->qpairs) && TAILQ_EMPTY(&tgroup->await_req))) {
3377 0 : return 0;
3378 : }
3379 :
3380 0 : STAILQ_FOREACH_SAFE(req, &group->pending_buf_queue, buf_link, req_tmp) {
3381 0 : tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
3382 0 : if (nvmf_tcp_req_process(ttransport, tcp_req) == false) {
3383 0 : break;
3384 : }
3385 : }
3386 :
3387 0 : rc = spdk_sock_group_poll(tgroup->sock_group);
3388 0 : if (rc < 0) {
3389 0 : SPDK_ERRLOG("Failed to poll sock_group=%p\n", tgroup->sock_group);
3390 : }
3391 :
3392 0 : TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) {
3393 0 : rc = nvmf_tcp_sock_process(tqpair);
3394 :
3395 : /* If there was a new socket error, disconnect */
3396 0 : if (rc < 0) {
3397 0 : nvmf_tcp_qpair_disconnect(tqpair);
3398 : }
3399 : }
3400 :
3401 0 : return rc;
3402 : }
3403 :
3404 : static int
3405 0 : nvmf_tcp_qpair_get_trid(struct spdk_nvmf_qpair *qpair,
3406 : struct spdk_nvme_transport_id *trid, bool peer)
3407 : {
3408 : struct spdk_nvmf_tcp_qpair *tqpair;
3409 : uint16_t port;
3410 :
3411 0 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
3412 0 : spdk_nvme_trid_populate_transport(trid, SPDK_NVME_TRANSPORT_TCP);
3413 :
3414 0 : if (peer) {
3415 0 : snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->initiator_addr);
3416 0 : port = tqpair->initiator_port;
3417 : } else {
3418 0 : snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->target_addr);
3419 0 : port = tqpair->target_port;
3420 : }
3421 :
3422 0 : if (spdk_sock_is_ipv4(tqpair->sock)) {
3423 0 : trid->adrfam = SPDK_NVMF_ADRFAM_IPV4;
3424 0 : } else if (spdk_sock_is_ipv6(tqpair->sock)) {
3425 0 : trid->adrfam = SPDK_NVMF_ADRFAM_IPV6;
3426 : } else {
3427 0 : return -1;
3428 : }
3429 :
3430 0 : snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%d", port);
3431 0 : return 0;
3432 : }
3433 :
3434 : static int
3435 0 : nvmf_tcp_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
3436 : struct spdk_nvme_transport_id *trid)
3437 : {
3438 0 : return nvmf_tcp_qpair_get_trid(qpair, trid, 0);
3439 : }
3440 :
3441 : static int
3442 0 : nvmf_tcp_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
3443 : struct spdk_nvme_transport_id *trid)
3444 : {
3445 0 : return nvmf_tcp_qpair_get_trid(qpair, trid, 1);
3446 : }
3447 :
3448 : static int
3449 0 : nvmf_tcp_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
3450 : struct spdk_nvme_transport_id *trid)
3451 : {
3452 0 : return nvmf_tcp_qpair_get_trid(qpair, trid, 0);
3453 : }
3454 :
3455 : static void
3456 0 : nvmf_tcp_req_set_abort_status(struct spdk_nvmf_request *req,
3457 : struct spdk_nvmf_tcp_req *tcp_req_to_abort)
3458 : {
3459 0 : nvmf_tcp_req_set_cpl(tcp_req_to_abort, SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_ABORTED_BY_REQUEST);
3460 0 : nvmf_tcp_req_set_state(tcp_req_to_abort, TCP_REQUEST_STATE_READY_TO_COMPLETE);
3461 :
3462 0 : req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */
3463 0 : }
3464 :
3465 : static int
3466 0 : _nvmf_tcp_qpair_abort_request(void *ctx)
3467 : {
3468 0 : struct spdk_nvmf_request *req = ctx;
3469 0 : struct spdk_nvmf_tcp_req *tcp_req_to_abort = SPDK_CONTAINEROF(req->req_to_abort,
3470 : struct spdk_nvmf_tcp_req, req);
3471 0 : struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair,
3472 : struct spdk_nvmf_tcp_qpair, qpair);
3473 0 : struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport,
3474 : struct spdk_nvmf_tcp_transport, transport);
3475 : int rc;
3476 :
3477 0 : spdk_poller_unregister(&req->poller);
3478 :
3479 0 : switch (tcp_req_to_abort->state) {
3480 0 : case TCP_REQUEST_STATE_EXECUTING:
3481 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_START:
3482 : case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT:
3483 0 : rc = nvmf_ctrlr_abort_request(req);
3484 0 : if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) {
3485 0 : return SPDK_POLLER_BUSY;
3486 : }
3487 0 : break;
3488 :
3489 0 : case TCP_REQUEST_STATE_NEED_BUFFER:
3490 0 : STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue,
3491 : &tcp_req_to_abort->req, spdk_nvmf_request, buf_link);
3492 :
3493 0 : nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort);
3494 0 : nvmf_tcp_req_process(ttransport, tcp_req_to_abort);
3495 0 : break;
3496 :
3497 0 : case TCP_REQUEST_STATE_AWAITING_R2T_ACK:
3498 : case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
3499 0 : if (spdk_get_ticks() < req->timeout_tsc) {
3500 0 : req->poller = SPDK_POLLER_REGISTER(_nvmf_tcp_qpair_abort_request, req, 0);
3501 0 : return SPDK_POLLER_BUSY;
3502 : }
3503 0 : break;
3504 :
3505 0 : default:
3506 : /* Requests in other states are either un-abortable (e.g.
3507 : * TRANSFERRING_CONTROLLER_TO_HOST) or should never end up here, as they're
3508 : * immediately transitioned to other states in nvmf_tcp_req_process() (e.g.
3509 : * READY_TO_EXECUTE). But it is fine to end up here, as we'll simply complete the
3510 : * abort request with the bit0 of dword0 set (command not aborted).
3511 : */
3512 0 : break;
3513 : }
3514 :
3515 0 : spdk_nvmf_request_complete(req);
3516 0 : return SPDK_POLLER_BUSY;
3517 : }
3518 :
3519 : static void
3520 0 : nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
3521 : struct spdk_nvmf_request *req)
3522 : {
3523 : struct spdk_nvmf_tcp_qpair *tqpair;
3524 : struct spdk_nvmf_tcp_transport *ttransport;
3525 : struct spdk_nvmf_transport *transport;
3526 : uint16_t cid;
3527 : uint32_t i;
3528 0 : struct spdk_nvmf_tcp_req *tcp_req_to_abort = NULL;
3529 :
3530 0 : tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
3531 0 : ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport);
3532 0 : transport = &ttransport->transport;
3533 :
3534 0 : cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
3535 :
3536 0 : for (i = 0; i < tqpair->resource_count; i++) {
3537 0 : if (tqpair->reqs[i].state != TCP_REQUEST_STATE_FREE &&
3538 0 : tqpair->reqs[i].req.cmd->nvme_cmd.cid == cid) {
3539 0 : tcp_req_to_abort = &tqpair->reqs[i];
3540 0 : break;
3541 : }
3542 : }
3543 :
3544 0 : spdk_trace_record(TRACE_TCP_QP_ABORT_REQ, tqpair->qpair.trace_id, 0, (uintptr_t)req);
3545 :
3546 0 : if (tcp_req_to_abort == NULL) {
3547 0 : spdk_nvmf_request_complete(req);
3548 0 : return;
3549 : }
3550 :
3551 0 : req->req_to_abort = &tcp_req_to_abort->req;
3552 0 : req->timeout_tsc = spdk_get_ticks() +
3553 0 : transport->opts.abort_timeout_sec * spdk_get_ticks_hz();
3554 0 : req->poller = NULL;
3555 :
3556 0 : _nvmf_tcp_qpair_abort_request(req);
3557 : }
3558 :
3559 : struct tcp_subsystem_add_host_opts {
3560 : char *psk;
3561 : };
3562 :
3563 : static const struct spdk_json_object_decoder tcp_subsystem_add_host_opts_decoder[] = {
3564 : {"psk", offsetof(struct tcp_subsystem_add_host_opts, psk), spdk_json_decode_string, true},
3565 : };
3566 :
3567 : static int
3568 1 : tcp_load_psk(const char *fname, char *buf, size_t bufsz)
3569 : {
3570 : FILE *psk_file;
3571 1 : struct stat statbuf;
3572 : int rc;
3573 :
3574 1 : if (stat(fname, &statbuf) != 0) {
3575 0 : SPDK_ERRLOG("Could not read permissions for PSK file\n");
3576 0 : return -EACCES;
3577 : }
3578 :
3579 1 : if ((statbuf.st_mode & TCP_PSK_INVALID_PERMISSIONS) != 0) {
3580 0 : SPDK_ERRLOG("Incorrect permissions for PSK file\n");
3581 0 : return -EPERM;
3582 : }
3583 1 : if ((size_t)statbuf.st_size > bufsz) {
3584 0 : SPDK_ERRLOG("Invalid PSK: too long\n");
3585 0 : return -EINVAL;
3586 : }
3587 1 : psk_file = fopen(fname, "r");
3588 1 : if (psk_file == NULL) {
3589 0 : SPDK_ERRLOG("Could not open PSK file\n");
3590 0 : return -EINVAL;
3591 : }
3592 :
3593 1 : rc = fread(buf, 1, statbuf.st_size, psk_file);
3594 1 : if (rc != statbuf.st_size) {
3595 0 : SPDK_ERRLOG("Failed to read PSK\n");
3596 0 : fclose(psk_file);
3597 0 : return -EINVAL;
3598 : }
3599 :
3600 1 : fclose(psk_file);
3601 1 : return 0;
3602 : }
3603 :
3604 1 : SPDK_LOG_DEPRECATION_REGISTER(nvmf_tcp_psk_path, "PSK path", "v24.09", 0);
3605 :
3606 : static int
3607 1 : nvmf_tcp_subsystem_add_host(struct spdk_nvmf_transport *transport,
3608 : const struct spdk_nvmf_subsystem *subsystem,
3609 : const char *hostnqn,
3610 : const struct spdk_json_val *transport_specific)
3611 : {
3612 1 : struct tcp_subsystem_add_host_opts opts;
3613 : struct spdk_nvmf_tcp_transport *ttransport;
3614 1 : struct tcp_psk_entry *tmp, *entry = NULL;
3615 1 : uint8_t psk_configured[SPDK_TLS_PSK_MAX_LEN] = {};
3616 1 : char psk_interchange[SPDK_TLS_PSK_MAX_LEN + 1] = {};
3617 : uint8_t tls_cipher_suite;
3618 1 : int rc = 0;
3619 1 : uint8_t psk_retained_hash;
3620 1 : uint64_t psk_configured_size;
3621 :
3622 1 : if (transport_specific == NULL) {
3623 0 : return 0;
3624 : }
3625 :
3626 1 : assert(transport != NULL);
3627 1 : assert(subsystem != NULL);
3628 :
3629 1 : memset(&opts, 0, sizeof(opts));
3630 :
3631 : /* Decode PSK (either name of a key or file path) */
3632 1 : if (spdk_json_decode_object_relaxed(transport_specific, tcp_subsystem_add_host_opts_decoder,
3633 : SPDK_COUNTOF(tcp_subsystem_add_host_opts_decoder), &opts)) {
3634 0 : SPDK_ERRLOG("spdk_json_decode_object failed\n");
3635 0 : return -EINVAL;
3636 : }
3637 :
3638 1 : if (opts.psk == NULL) {
3639 0 : return 0;
3640 : }
3641 :
3642 1 : entry = calloc(1, sizeof(struct tcp_psk_entry));
3643 1 : if (entry == NULL) {
3644 0 : SPDK_ERRLOG("Unable to allocate memory for PSK entry!\n");
3645 0 : rc = -ENOMEM;
3646 0 : goto end;
3647 : }
3648 :
3649 1 : entry->key = spdk_keyring_get_key(opts.psk);
3650 1 : if (entry->key != NULL) {
3651 0 : rc = spdk_key_get_key(entry->key, psk_interchange, SPDK_TLS_PSK_MAX_LEN);
3652 0 : if (rc < 0) {
3653 0 : SPDK_ERRLOG("Failed to retreive PSK '%s'\n", opts.psk);
3654 0 : rc = -EINVAL;
3655 0 : goto end;
3656 : }
3657 : } else {
3658 1 : if (strlen(opts.psk) >= sizeof(entry->psk)) {
3659 0 : SPDK_ERRLOG("PSK path too long\n");
3660 0 : rc = -EINVAL;
3661 0 : goto end;
3662 : }
3663 :
3664 1 : rc = tcp_load_psk(opts.psk, psk_interchange, SPDK_TLS_PSK_MAX_LEN);
3665 1 : if (rc) {
3666 0 : SPDK_ERRLOG("Could not retrieve PSK from file\n");
3667 0 : goto end;
3668 : }
3669 :
3670 1 : SPDK_LOG_DEPRECATED(nvmf_tcp_psk_path);
3671 : }
3672 :
3673 : /* Parse PSK interchange to get length of base64 encoded data.
3674 : * This is then used to decide which cipher suite should be used
3675 : * to generate PSK identity and TLS PSK later on. */
3676 1 : rc = nvme_tcp_parse_interchange_psk(psk_interchange, psk_configured, sizeof(psk_configured),
3677 : &psk_configured_size, &psk_retained_hash);
3678 1 : if (rc < 0) {
3679 0 : SPDK_ERRLOG("Failed to parse PSK interchange!\n");
3680 0 : goto end;
3681 : }
3682 :
3683 : /* The Base64 string encodes the configured PSK (32 or 48 bytes binary).
3684 : * This check also ensures that psk_configured_size is smaller than
3685 : * psk_retained buffer size. */
3686 1 : if (psk_configured_size == SHA256_DIGEST_LENGTH) {
3687 1 : tls_cipher_suite = NVME_TCP_CIPHER_AES_128_GCM_SHA256;
3688 0 : } else if (psk_configured_size == SHA384_DIGEST_LENGTH) {
3689 0 : tls_cipher_suite = NVME_TCP_CIPHER_AES_256_GCM_SHA384;
3690 : } else {
3691 0 : SPDK_ERRLOG("Unrecognized cipher suite!\n");
3692 0 : rc = -EINVAL;
3693 0 : goto end;
3694 : }
3695 :
3696 1 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
3697 : /* Generate PSK identity. */
3698 1 : rc = nvme_tcp_generate_psk_identity(entry->pskid, sizeof(entry->pskid), hostnqn,
3699 1 : subsystem->subnqn, tls_cipher_suite);
3700 1 : if (rc) {
3701 0 : rc = -EINVAL;
3702 0 : goto end;
3703 : }
3704 : /* Check if PSK identity entry already exists. */
3705 1 : TAILQ_FOREACH(tmp, &ttransport->psks, link) {
3706 0 : if (strncmp(tmp->pskid, entry->pskid, NVMF_PSK_IDENTITY_LEN) == 0) {
3707 0 : SPDK_ERRLOG("Given PSK identity: %s entry already exists!\n", entry->pskid);
3708 0 : rc = -EEXIST;
3709 0 : goto end;
3710 : }
3711 : }
3712 :
3713 1 : if (snprintf(entry->hostnqn, sizeof(entry->hostnqn), "%s", hostnqn) < 0) {
3714 0 : SPDK_ERRLOG("Could not write hostnqn string!\n");
3715 0 : rc = -EINVAL;
3716 0 : goto end;
3717 : }
3718 1 : if (snprintf(entry->subnqn, sizeof(entry->subnqn), "%s", subsystem->subnqn) < 0) {
3719 0 : SPDK_ERRLOG("Could not write subnqn string!\n");
3720 0 : rc = -EINVAL;
3721 0 : goto end;
3722 : }
3723 :
3724 1 : entry->tls_cipher_suite = tls_cipher_suite;
3725 :
3726 : /* No hash indicates that Configured PSK must be used as Retained PSK. */
3727 1 : if (psk_retained_hash == NVME_TCP_HASH_ALGORITHM_NONE) {
3728 : /* Psk configured is either 32 or 48 bytes long. */
3729 0 : memcpy(entry->psk, psk_configured, psk_configured_size);
3730 0 : entry->psk_size = psk_configured_size;
3731 : } else {
3732 : /* Derive retained PSK. */
3733 1 : rc = nvme_tcp_derive_retained_psk(psk_configured, psk_configured_size, hostnqn, entry->psk,
3734 : SPDK_TLS_PSK_MAX_LEN, psk_retained_hash);
3735 1 : if (rc < 0) {
3736 0 : SPDK_ERRLOG("Unable to derive retained PSK!\n");
3737 0 : goto end;
3738 : }
3739 1 : entry->psk_size = rc;
3740 : }
3741 :
3742 1 : if (entry->key == NULL) {
3743 1 : rc = snprintf(entry->psk_path, sizeof(entry->psk_path), "%s", opts.psk);
3744 1 : if (rc < 0 || (size_t)rc >= sizeof(entry->psk_path)) {
3745 0 : SPDK_ERRLOG("Could not save PSK path!\n");
3746 0 : rc = -ENAMETOOLONG;
3747 0 : goto end;
3748 : }
3749 : }
3750 :
3751 1 : TAILQ_INSERT_TAIL(&ttransport->psks, entry, link);
3752 1 : rc = 0;
3753 :
3754 1 : end:
3755 1 : spdk_memset_s(psk_configured, sizeof(psk_configured), 0, sizeof(psk_configured));
3756 1 : spdk_memset_s(psk_interchange, sizeof(psk_interchange), 0, sizeof(psk_interchange));
3757 :
3758 1 : free(opts.psk);
3759 1 : if (rc != 0) {
3760 0 : nvmf_tcp_free_psk_entry(entry);
3761 : }
3762 :
3763 1 : return rc;
3764 : }
3765 :
3766 : static void
3767 1 : nvmf_tcp_subsystem_remove_host(struct spdk_nvmf_transport *transport,
3768 : const struct spdk_nvmf_subsystem *subsystem,
3769 : const char *hostnqn)
3770 : {
3771 : struct spdk_nvmf_tcp_transport *ttransport;
3772 : struct tcp_psk_entry *entry, *tmp;
3773 :
3774 1 : assert(transport != NULL);
3775 1 : assert(subsystem != NULL);
3776 :
3777 1 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
3778 1 : TAILQ_FOREACH_SAFE(entry, &ttransport->psks, link, tmp) {
3779 1 : if ((strncmp(entry->hostnqn, hostnqn, SPDK_NVMF_NQN_MAX_LEN)) == 0 &&
3780 1 : (strncmp(entry->subnqn, subsystem->subnqn, SPDK_NVMF_NQN_MAX_LEN)) == 0) {
3781 1 : TAILQ_REMOVE(&ttransport->psks, entry, link);
3782 1 : nvmf_tcp_free_psk_entry(entry);
3783 1 : break;
3784 : }
3785 : }
3786 1 : }
3787 :
3788 : static void
3789 0 : nvmf_tcp_subsystem_dump_host(struct spdk_nvmf_transport *transport,
3790 : const struct spdk_nvmf_subsystem *subsystem, const char *hostnqn,
3791 : struct spdk_json_write_ctx *w)
3792 : {
3793 : struct spdk_nvmf_tcp_transport *ttransport;
3794 : struct tcp_psk_entry *entry;
3795 :
3796 0 : assert(transport != NULL);
3797 0 : assert(subsystem != NULL);
3798 :
3799 0 : ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
3800 0 : TAILQ_FOREACH(entry, &ttransport->psks, link) {
3801 0 : if ((strncmp(entry->hostnqn, hostnqn, SPDK_NVMF_NQN_MAX_LEN)) == 0 &&
3802 0 : (strncmp(entry->subnqn, subsystem->subnqn, SPDK_NVMF_NQN_MAX_LEN)) == 0) {
3803 0 : spdk_json_write_named_string(w, "psk", entry->key ?
3804 0 : spdk_key_get_name(entry->key) : entry->psk_path);
3805 0 : break;
3806 : }
3807 : }
3808 0 : }
3809 :
3810 : static void
3811 1 : nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts)
3812 : {
3813 1 : opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH;
3814 1 : opts->max_qpairs_per_ctrlr = SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR;
3815 1 : opts->in_capsule_data_size = SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE;
3816 1 : opts->max_io_size = SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE;
3817 1 : opts->io_unit_size = SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE;
3818 1 : opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH;
3819 1 : opts->num_shared_buffers = SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS;
3820 1 : opts->buf_cache_size = SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE;
3821 1 : opts->dif_insert_or_strip = SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP;
3822 1 : opts->abort_timeout_sec = SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC;
3823 1 : opts->transport_specific = NULL;
3824 1 : }
3825 :
3826 : const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
3827 : .name = "TCP",
3828 : .type = SPDK_NVME_TRANSPORT_TCP,
3829 : .opts_init = nvmf_tcp_opts_init,
3830 : .create = nvmf_tcp_create,
3831 : .dump_opts = nvmf_tcp_dump_opts,
3832 : .destroy = nvmf_tcp_destroy,
3833 :
3834 : .listen = nvmf_tcp_listen,
3835 : .stop_listen = nvmf_tcp_stop_listen,
3836 :
3837 : .listener_discover = nvmf_tcp_discover,
3838 :
3839 : .poll_group_create = nvmf_tcp_poll_group_create,
3840 : .get_optimal_poll_group = nvmf_tcp_get_optimal_poll_group,
3841 : .poll_group_destroy = nvmf_tcp_poll_group_destroy,
3842 : .poll_group_add = nvmf_tcp_poll_group_add,
3843 : .poll_group_remove = nvmf_tcp_poll_group_remove,
3844 : .poll_group_poll = nvmf_tcp_poll_group_poll,
3845 :
3846 : .req_free = nvmf_tcp_req_free,
3847 : .req_complete = nvmf_tcp_req_complete,
3848 :
3849 : .qpair_fini = nvmf_tcp_close_qpair,
3850 : .qpair_get_local_trid = nvmf_tcp_qpair_get_local_trid,
3851 : .qpair_get_peer_trid = nvmf_tcp_qpair_get_peer_trid,
3852 : .qpair_get_listen_trid = nvmf_tcp_qpair_get_listen_trid,
3853 : .qpair_abort_request = nvmf_tcp_qpair_abort_request,
3854 : .subsystem_add_host = nvmf_tcp_subsystem_add_host,
3855 : .subsystem_remove_host = nvmf_tcp_subsystem_remove_host,
3856 : .subsystem_dump_host = nvmf_tcp_subsystem_dump_host,
3857 : };
3858 :
3859 1 : SPDK_NVMF_TRANSPORT_REGISTER(tcp, &spdk_nvmf_transport_tcp);
3860 1 : SPDK_LOG_REGISTER_COMPONENT(nvmf_tcp)
|