Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2018 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #ifndef SPDK_INTERNAL_NVME_TCP_H
8 : #define SPDK_INTERNAL_NVME_TCP_H
9 :
10 : #include "spdk/likely.h"
11 : #include "spdk/sock.h"
12 : #include "spdk/dif.h"
13 : #include "spdk/hexlify.h"
14 : #include "spdk/nvmf_spec.h"
15 : #include "spdk/util.h"
16 : #include "spdk/base64.h"
17 :
18 : #include "sgl.h"
19 :
20 : #include "openssl/evp.h"
21 : #include "openssl/kdf.h"
22 : #include "openssl/sha.h"
23 :
24 : #define SPDK_CRC32C_XOR 0xffffffffUL
25 : #define SPDK_NVME_TCP_DIGEST_LEN 4
26 : #define SPDK_NVME_TCP_DIGEST_ALIGNMENT 4
27 : #define SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT 30
28 : #define SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR 8
29 : #define SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE 8192u
30 : /*
31 : * Maximum number of SGL elements.
32 : */
33 : #define NVME_TCP_MAX_SGL_DESCRIPTORS (16)
34 :
35 : #define MAKE_DIGEST_WORD(BUF, CRC32C) \
36 : ( ((*((uint8_t *)(BUF)+0)) = (uint8_t)((uint32_t)(CRC32C) >> 0)), \
37 : ((*((uint8_t *)(BUF)+1)) = (uint8_t)((uint32_t)(CRC32C) >> 8)), \
38 : ((*((uint8_t *)(BUF)+2)) = (uint8_t)((uint32_t)(CRC32C) >> 16)), \
39 : ((*((uint8_t *)(BUF)+3)) = (uint8_t)((uint32_t)(CRC32C) >> 24)))
40 :
41 : #define MATCH_DIGEST_WORD(BUF, CRC32C) \
42 : ( ((((uint32_t) *((uint8_t *)(BUF)+0)) << 0) \
43 : | (((uint32_t) *((uint8_t *)(BUF)+1)) << 8) \
44 : | (((uint32_t) *((uint8_t *)(BUF)+2)) << 16) \
45 : | (((uint32_t) *((uint8_t *)(BUF)+3)) << 24)) \
46 : == (CRC32C))
47 :
48 : #define DGET32(B) \
49 : ((( (uint32_t) *((uint8_t *)(B)+0)) << 0) \
50 : | (((uint32_t) *((uint8_t *)(B)+1)) << 8) \
51 : | (((uint32_t) *((uint8_t *)(B)+2)) << 16) \
52 : | (((uint32_t) *((uint8_t *)(B)+3)) << 24))
53 :
54 : #define DSET32(B,D) \
55 : (((*((uint8_t *)(B)+0)) = (uint8_t)((uint32_t)(D) >> 0)), \
56 : ((*((uint8_t *)(B)+1)) = (uint8_t)((uint32_t)(D) >> 8)), \
57 : ((*((uint8_t *)(B)+2)) = (uint8_t)((uint32_t)(D) >> 16)), \
58 : ((*((uint8_t *)(B)+3)) = (uint8_t)((uint32_t)(D) >> 24)))
59 :
60 : /* The PSK identity comprises of following components:
61 : * 4-character format specifier "NVMe" +
62 : * 1-character TLS protocol version indicator +
63 : * 1-character PSK type indicator, specifying the used PSK +
64 : * 2-characters hash specifier +
65 : * NQN of the host (SPDK_NVMF_NQN_MAX_LEN -> 223) +
66 : * NQN of the subsystem (SPDK_NVMF_NQN_MAX_LEN -> 223) +
67 : * 2 space character separators +
68 : * 1 null terminator =
69 : * 457 characters. */
70 : #define NVMF_PSK_IDENTITY_LEN (SPDK_NVMF_NQN_MAX_LEN + SPDK_NVMF_NQN_MAX_LEN + 11)
71 :
72 : /* The maximum size of hkdf_info is defined by RFC 8446, 514B (2 + 256 + 256). */
73 : #define NVME_TCP_HKDF_INFO_MAX_LEN 514
74 :
75 : #define PSK_ID_PREFIX "NVMe0R"
76 :
77 : enum nvme_tcp_cipher_suite {
78 : NVME_TCP_CIPHER_AES_128_GCM_SHA256,
79 : NVME_TCP_CIPHER_AES_256_GCM_SHA384,
80 : };
81 :
82 : typedef void (*nvme_tcp_qpair_xfer_complete_cb)(void *cb_arg);
83 :
84 : struct nvme_tcp_pdu {
85 : union {
86 : /* to hold error pdu data */
87 : uint8_t raw[SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE];
88 : struct spdk_nvme_tcp_common_pdu_hdr common;
89 : struct spdk_nvme_tcp_ic_req ic_req;
90 : struct spdk_nvme_tcp_term_req_hdr term_req;
91 : struct spdk_nvme_tcp_cmd capsule_cmd;
92 : struct spdk_nvme_tcp_h2c_data_hdr h2c_data;
93 : struct spdk_nvme_tcp_ic_resp ic_resp;
94 : struct spdk_nvme_tcp_rsp capsule_resp;
95 : struct spdk_nvme_tcp_c2h_data_hdr c2h_data;
96 : struct spdk_nvme_tcp_r2t_hdr r2t;
97 :
98 : } hdr;
99 :
100 : bool has_hdgst;
101 : bool ddgst_enable;
102 : uint32_t data_digest_crc32;
103 : uint8_t data_digest[SPDK_NVME_TCP_DIGEST_LEN];
104 :
105 : uint8_t ch_valid_bytes;
106 : uint8_t psh_valid_bytes;
107 : uint8_t psh_len;
108 :
109 : nvme_tcp_qpair_xfer_complete_cb cb_fn;
110 : void *cb_arg;
111 :
112 : /* The sock request ends with a 0 length iovec. Place the actual iovec immediately
113 : * after it. There is a static assert below to check if the compiler inserted
114 : * any unwanted padding */
115 : struct spdk_sock_request sock_req;
116 : struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS * 2];
117 :
118 : struct iovec data_iov[NVME_TCP_MAX_SGL_DESCRIPTORS];
119 : uint32_t data_iovcnt;
120 : uint32_t data_len;
121 :
122 : uint32_t rw_offset;
123 : TAILQ_ENTRY(nvme_tcp_pdu) tailq;
124 : uint32_t remaining;
125 : uint32_t padding_len;
126 :
127 : struct spdk_dif_ctx *dif_ctx;
128 :
129 : void *req; /* data tied to a tcp request */
130 : void *qpair;
131 : SLIST_ENTRY(nvme_tcp_pdu) slist;
132 : };
133 : SPDK_STATIC_ASSERT(offsetof(struct nvme_tcp_pdu,
134 : sock_req) + sizeof(struct spdk_sock_request) == offsetof(struct nvme_tcp_pdu, iov),
135 : "Compiler inserted padding between iov and sock_req");
136 :
137 : enum nvme_tcp_pdu_recv_state {
138 : /* Ready to wait for PDU */
139 : NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY,
140 :
141 : /* Active tqpair waiting for any PDU common header */
142 : NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH,
143 :
144 : /* Active tqpair waiting for any PDU specific header */
145 : NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH,
146 :
147 : /* Active tqpair waiting for a tcp request, only use in target side */
148 : NVME_TCP_PDU_RECV_STATE_AWAIT_REQ,
149 :
150 : /* Active tqpair waiting for a free buffer to store PDU */
151 : NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_BUF,
152 :
153 : /* Active tqpair waiting for payload */
154 : NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD,
155 :
156 : /* Active tqpair waiting for all outstanding PDUs to complete */
157 : NVME_TCP_PDU_RECV_STATE_QUIESCING,
158 :
159 : /* Active tqpair does not wait for payload */
160 : NVME_TCP_PDU_RECV_STATE_ERROR,
161 : };
162 :
163 : enum nvme_tcp_error_codes {
164 : NVME_TCP_PDU_IN_PROGRESS = 0,
165 : NVME_TCP_CONNECTION_FATAL = -1,
166 : NVME_TCP_PDU_FATAL = -2,
167 : };
168 :
169 : enum nvme_tcp_qpair_state {
170 : NVME_TCP_QPAIR_STATE_INVALID = 0,
171 : NVME_TCP_QPAIR_STATE_INITIALIZING = 1,
172 : NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND = 2,
173 : NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL = 3,
174 : NVME_TCP_QPAIR_STATE_RUNNING = 4,
175 : NVME_TCP_QPAIR_STATE_EXITING = 5,
176 : NVME_TCP_QPAIR_STATE_EXITED = 6,
177 : };
178 :
179 : static const bool g_nvme_tcp_hdgst[] = {
180 : [SPDK_NVME_TCP_PDU_TYPE_IC_REQ] = false,
181 : [SPDK_NVME_TCP_PDU_TYPE_IC_RESP] = false,
182 : [SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ] = false,
183 : [SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ] = false,
184 : [SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD] = true,
185 : [SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP] = true,
186 : [SPDK_NVME_TCP_PDU_TYPE_H2C_DATA] = true,
187 : [SPDK_NVME_TCP_PDU_TYPE_C2H_DATA] = true,
188 : [SPDK_NVME_TCP_PDU_TYPE_R2T] = true
189 : };
190 :
191 : static const bool g_nvme_tcp_ddgst[] = {
192 : [SPDK_NVME_TCP_PDU_TYPE_IC_REQ] = false,
193 : [SPDK_NVME_TCP_PDU_TYPE_IC_RESP] = false,
194 : [SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ] = false,
195 : [SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ] = false,
196 : [SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD] = true,
197 : [SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP] = false,
198 : [SPDK_NVME_TCP_PDU_TYPE_H2C_DATA] = true,
199 : [SPDK_NVME_TCP_PDU_TYPE_C2H_DATA] = true,
200 : [SPDK_NVME_TCP_PDU_TYPE_R2T] = false
201 : };
202 :
203 : static uint32_t
204 4 : nvme_tcp_pdu_calc_header_digest(struct nvme_tcp_pdu *pdu)
205 : {
206 : uint32_t crc32c;
207 4 : uint32_t hlen = pdu->hdr.common.hlen;
208 :
209 4 : crc32c = spdk_crc32c_update(&pdu->hdr.raw, hlen, ~0);
210 4 : crc32c = crc32c ^ SPDK_CRC32C_XOR;
211 4 : return crc32c;
212 : }
213 :
214 : static uint32_t
215 3 : nvme_tcp_pdu_calc_data_digest(struct nvme_tcp_pdu *pdu)
216 : {
217 3 : uint32_t crc32c = SPDK_CRC32C_XOR;
218 : uint32_t mod;
219 :
220 3 : assert(pdu->data_len != 0);
221 :
222 3 : if (spdk_likely(!pdu->dif_ctx)) {
223 3 : crc32c = spdk_crc32c_iov_update(pdu->data_iov, pdu->data_iovcnt, crc32c);
224 : } else {
225 0 : spdk_dif_update_crc32c_stream(pdu->data_iov, pdu->data_iovcnt,
226 0 : 0, pdu->data_len, &crc32c, pdu->dif_ctx);
227 : }
228 :
229 3 : mod = pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT;
230 3 : if (mod != 0) {
231 0 : uint32_t pad_length = SPDK_NVME_TCP_DIGEST_ALIGNMENT - mod;
232 0 : uint8_t pad[3] = {0, 0, 0};
233 :
234 0 : assert(pad_length > 0);
235 0 : assert(pad_length <= sizeof(pad));
236 0 : crc32c = spdk_crc32c_update(pad, pad_length, crc32c);
237 : }
238 3 : return crc32c;
239 : }
240 :
241 : static inline void
242 52 : _nvme_tcp_sgl_get_buf(struct spdk_iov_sgl *s, void **_buf, uint32_t *_buf_len)
243 : {
244 52 : if (_buf != NULL) {
245 52 : *_buf = (uint8_t *)s->iov->iov_base + s->iov_offset;
246 : }
247 52 : if (_buf_len != NULL) {
248 52 : *_buf_len = s->iov->iov_len - s->iov_offset;
249 : }
250 52 : }
251 :
252 : static inline bool
253 29 : _nvme_tcp_sgl_append_multi(struct spdk_iov_sgl *s, struct iovec *iov, int iovcnt)
254 : {
255 : int i;
256 :
257 75 : for (i = 0; i < iovcnt; i++) {
258 46 : if (!spdk_iov_sgl_append(s, iov[i].iov_base, iov[i].iov_len)) {
259 0 : return false;
260 : }
261 : }
262 :
263 29 : return true;
264 : }
265 :
266 : static inline uint32_t
267 0 : _get_iov_array_size(struct iovec *iov, int iovcnt)
268 : {
269 : int i;
270 0 : uint32_t size = 0;
271 :
272 0 : for (i = 0; i < iovcnt; i++) {
273 0 : size += iov[i].iov_len;
274 : }
275 :
276 0 : return size;
277 : }
278 :
279 : static inline bool
280 1 : _nvme_tcp_sgl_append_multi_with_md(struct spdk_iov_sgl *s, struct iovec *iov, int iovcnt,
281 : uint32_t data_len, const struct spdk_dif_ctx *dif_ctx)
282 : {
283 : int rc;
284 1 : uint32_t mapped_len = 0;
285 :
286 1 : if (s->iov_offset >= data_len) {
287 0 : s->iov_offset -= _get_iov_array_size(iov, iovcnt);
288 : } else {
289 1 : rc = spdk_dif_set_md_interleave_iovs(s->iov, s->iovcnt, iov, iovcnt,
290 1 : s->iov_offset, data_len - s->iov_offset,
291 : &mapped_len, dif_ctx);
292 1 : if (rc < 0) {
293 0 : SPDK_ERRLOG("Failed to setup iovs for DIF insert/strip.\n");
294 0 : return false;
295 : }
296 :
297 1 : s->total_size += mapped_len;
298 1 : s->iov_offset = 0;
299 1 : assert(s->iovcnt >= rc);
300 1 : s->iovcnt -= rc;
301 1 : s->iov += rc;
302 :
303 1 : if (s->iovcnt == 0) {
304 0 : return false;
305 : }
306 : }
307 :
308 1 : return true;
309 : }
310 :
311 : static int
312 49 : nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
313 : bool hdgst_enable, bool ddgst_enable, uint32_t *_mapped_length)
314 : {
315 : uint32_t hlen;
316 : uint32_t plen __attribute__((unused));
317 49 : struct spdk_iov_sgl sgl;
318 :
319 49 : if (iovcnt == 0) {
320 0 : return 0;
321 : }
322 :
323 49 : spdk_iov_sgl_init(&sgl, iov, iovcnt, 0);
324 49 : hlen = pdu->hdr.common.hlen;
325 :
326 : /* Header Digest */
327 49 : if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && hdgst_enable) {
328 7 : hlen += SPDK_NVME_TCP_DIGEST_LEN;
329 : }
330 :
331 49 : plen = hlen;
332 49 : if (!pdu->data_len) {
333 : /* PDU header + possible header digest */
334 19 : spdk_iov_sgl_append(&sgl, (uint8_t *)&pdu->hdr.raw, hlen);
335 19 : goto end;
336 : }
337 :
338 : /* Padding */
339 30 : if (pdu->padding_len > 0) {
340 1 : hlen += pdu->padding_len;
341 1 : plen = hlen;
342 : }
343 :
344 30 : if (!spdk_iov_sgl_append(&sgl, (uint8_t *)&pdu->hdr.raw, hlen)) {
345 0 : goto end;
346 : }
347 :
348 : /* Data Segment */
349 30 : plen += pdu->data_len;
350 30 : if (spdk_likely(!pdu->dif_ctx)) {
351 29 : if (!_nvme_tcp_sgl_append_multi(&sgl, pdu->data_iov, pdu->data_iovcnt)) {
352 0 : goto end;
353 : }
354 : } else {
355 1 : if (!_nvme_tcp_sgl_append_multi_with_md(&sgl, pdu->data_iov, pdu->data_iovcnt,
356 1 : pdu->data_len, pdu->dif_ctx)) {
357 0 : goto end;
358 : }
359 : }
360 :
361 : /* Data Digest */
362 30 : if (g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && ddgst_enable) {
363 6 : plen += SPDK_NVME_TCP_DIGEST_LEN;
364 6 : spdk_iov_sgl_append(&sgl, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
365 : }
366 :
367 30 : assert(plen == pdu->hdr.common.plen);
368 :
369 49 : end:
370 49 : if (_mapped_length != NULL) {
371 49 : *_mapped_length = sgl.total_size;
372 : }
373 :
374 49 : return iovcnt - sgl.iovcnt;
375 : }
376 :
377 : static int
378 0 : nvme_tcp_build_payload_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
379 : bool ddgst_enable, uint32_t *_mapped_length)
380 : {
381 0 : struct spdk_iov_sgl sgl;
382 :
383 0 : if (iovcnt == 0) {
384 0 : return 0;
385 : }
386 :
387 0 : spdk_iov_sgl_init(&sgl, iov, iovcnt, pdu->rw_offset);
388 :
389 0 : if (spdk_likely(!pdu->dif_ctx)) {
390 0 : if (!_nvme_tcp_sgl_append_multi(&sgl, pdu->data_iov, pdu->data_iovcnt)) {
391 0 : goto end;
392 : }
393 : } else {
394 0 : if (!_nvme_tcp_sgl_append_multi_with_md(&sgl, pdu->data_iov, pdu->data_iovcnt,
395 0 : pdu->data_len, pdu->dif_ctx)) {
396 0 : goto end;
397 : }
398 : }
399 :
400 : /* Data Digest */
401 0 : if (ddgst_enable) {
402 0 : spdk_iov_sgl_append(&sgl, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
403 : }
404 :
405 0 : end:
406 0 : if (_mapped_length != NULL) {
407 0 : *_mapped_length = sgl.total_size;
408 : }
409 0 : return iovcnt - sgl.iovcnt;
410 : }
411 :
412 : static int
413 4 : nvme_tcp_read_data(struct spdk_sock *sock, int bytes,
414 : void *buf)
415 : {
416 : int ret;
417 :
418 4 : ret = spdk_sock_recv(sock, buf, bytes);
419 :
420 4 : if (ret > 0) {
421 4 : return ret;
422 : }
423 :
424 0 : if (ret < 0) {
425 0 : if (errno == EAGAIN || errno == EWOULDBLOCK) {
426 0 : return 0;
427 : }
428 :
429 : /* For connect reset issue, do not output error log */
430 0 : if (errno != ECONNRESET) {
431 0 : SPDK_ERRLOG("spdk_sock_recv() failed, errno %d: %s\n",
432 : errno, spdk_strerror(errno));
433 : }
434 : }
435 :
436 : /* connection closed */
437 0 : return NVME_TCP_CONNECTION_FATAL;
438 : }
439 :
440 : static int
441 0 : nvme_tcp_readv_data(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
442 : {
443 : int ret;
444 :
445 0 : assert(sock != NULL);
446 0 : if (iov == NULL || iovcnt == 0) {
447 0 : return 0;
448 : }
449 :
450 0 : if (iovcnt == 1) {
451 0 : return nvme_tcp_read_data(sock, iov->iov_len, iov->iov_base);
452 : }
453 :
454 0 : ret = spdk_sock_readv(sock, iov, iovcnt);
455 :
456 0 : if (ret > 0) {
457 0 : return ret;
458 : }
459 :
460 0 : if (ret < 0) {
461 0 : if (errno == EAGAIN || errno == EWOULDBLOCK) {
462 0 : return 0;
463 : }
464 :
465 : /* For connect reset issue, do not output error log */
466 0 : if (errno != ECONNRESET) {
467 0 : SPDK_ERRLOG("spdk_sock_readv() failed, errno %d: %s\n",
468 : errno, spdk_strerror(errno));
469 : }
470 : }
471 :
472 : /* connection closed */
473 0 : return NVME_TCP_CONNECTION_FATAL;
474 : }
475 :
476 :
477 : static int
478 0 : nvme_tcp_read_payload_data(struct spdk_sock *sock, struct nvme_tcp_pdu *pdu)
479 : {
480 0 : struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS + 1];
481 : int iovcnt;
482 :
483 0 : iovcnt = nvme_tcp_build_payload_iovs(iov, NVME_TCP_MAX_SGL_DESCRIPTORS + 1, pdu,
484 0 : pdu->ddgst_enable, NULL);
485 0 : assert(iovcnt >= 0);
486 :
487 0 : return nvme_tcp_readv_data(sock, iov, iovcnt);
488 : }
489 :
490 : static void
491 36 : _nvme_tcp_pdu_set_data(struct nvme_tcp_pdu *pdu, void *data, uint32_t data_len)
492 : {
493 36 : pdu->data_iov[0].iov_base = data;
494 36 : pdu->data_iov[0].iov_len = data_len;
495 36 : pdu->data_iovcnt = 1;
496 36 : }
497 :
498 : static void
499 30 : nvme_tcp_pdu_set_data(struct nvme_tcp_pdu *pdu, void *data, uint32_t data_len)
500 : {
501 30 : _nvme_tcp_pdu_set_data(pdu, data, data_len);
502 30 : pdu->data_len = data_len;
503 30 : }
504 :
505 : static void
506 22 : nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
507 : struct iovec *iov, int iovcnt,
508 : uint32_t data_offset, uint32_t data_len)
509 : {
510 22 : uint32_t buf_offset, buf_len, remain_len, len;
511 22 : uint8_t *buf;
512 22 : struct spdk_iov_sgl pdu_sgl, buf_sgl;
513 :
514 22 : pdu->data_len = data_len;
515 :
516 22 : if (spdk_likely(!pdu->dif_ctx)) {
517 16 : buf_offset = data_offset;
518 16 : buf_len = data_len;
519 : } else {
520 6 : spdk_dif_ctx_set_data_offset(pdu->dif_ctx, data_offset);
521 6 : spdk_dif_get_range_with_md(data_offset, data_len,
522 6 : &buf_offset, &buf_len, pdu->dif_ctx);
523 : }
524 :
525 22 : if (iovcnt == 1) {
526 6 : _nvme_tcp_pdu_set_data(pdu, (void *)((uint64_t)iov[0].iov_base + buf_offset), buf_len);
527 : } else {
528 16 : spdk_iov_sgl_init(&pdu_sgl, pdu->data_iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0);
529 16 : spdk_iov_sgl_init(&buf_sgl, iov, iovcnt, 0);
530 :
531 16 : spdk_iov_sgl_advance(&buf_sgl, buf_offset);
532 16 : remain_len = buf_len;
533 :
534 67 : while (remain_len > 0) {
535 52 : _nvme_tcp_sgl_get_buf(&buf_sgl, (void *)&buf, &len);
536 52 : len = spdk_min(len, remain_len);
537 :
538 52 : spdk_iov_sgl_advance(&buf_sgl, len);
539 52 : remain_len -= len;
540 :
541 52 : if (!spdk_iov_sgl_append(&pdu_sgl, buf, len)) {
542 1 : break;
543 : }
544 : }
545 :
546 16 : assert(remain_len == 0);
547 16 : assert(pdu_sgl.total_size == buf_len);
548 :
549 16 : pdu->data_iovcnt = NVME_TCP_MAX_SGL_DESCRIPTORS - pdu_sgl.iovcnt;
550 : }
551 22 : }
552 :
553 : static void
554 3 : nvme_tcp_pdu_calc_psh_len(struct nvme_tcp_pdu *pdu, bool hdgst_enable)
555 : {
556 : uint8_t psh_len, pdo, padding_len;
557 :
558 3 : psh_len = pdu->hdr.common.hlen;
559 :
560 3 : if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && hdgst_enable) {
561 0 : pdu->has_hdgst = true;
562 0 : psh_len += SPDK_NVME_TCP_DIGEST_LEN;
563 : }
564 3 : if (pdu->hdr.common.plen > psh_len) {
565 0 : switch (pdu->hdr.common.pdu_type) {
566 0 : case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD:
567 : case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA:
568 : case SPDK_NVME_TCP_PDU_TYPE_C2H_DATA:
569 0 : pdo = pdu->hdr.common.pdo;
570 0 : padding_len = pdo - psh_len;
571 0 : if (padding_len > 0) {
572 0 : psh_len = pdo;
573 : }
574 0 : break;
575 0 : default:
576 : /* There is no padding for other PDU types */
577 0 : break;
578 : }
579 : }
580 :
581 3 : psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
582 3 : pdu->psh_len = psh_len;
583 3 : }
584 :
585 : static inline int
586 4 : nvme_tcp_generate_psk_identity(char *out_id, size_t out_id_len, const char *hostnqn,
587 : const char *subnqn, enum nvme_tcp_cipher_suite tls_cipher_suite)
588 : {
589 : int rc;
590 :
591 4 : assert(out_id != NULL);
592 :
593 4 : if (out_id_len < strlen(PSK_ID_PREFIX) + strlen(hostnqn) + strlen(subnqn) + 5) {
594 1 : SPDK_ERRLOG("Out buffer too small!\n");
595 1 : return -1;
596 : }
597 :
598 3 : if (tls_cipher_suite == NVME_TCP_CIPHER_AES_128_GCM_SHA256) {
599 2 : rc = snprintf(out_id, out_id_len, "%s%s %s %s", PSK_ID_PREFIX, "01",
600 : hostnqn, subnqn);
601 1 : } else if (tls_cipher_suite == NVME_TCP_CIPHER_AES_256_GCM_SHA384) {
602 0 : rc = snprintf(out_id, out_id_len, "%s%s %s %s", PSK_ID_PREFIX, "02",
603 : hostnqn, subnqn);
604 : } else {
605 1 : SPDK_ERRLOG("Unknown cipher suite requested!\n");
606 1 : return -EOPNOTSUPP;
607 : }
608 :
609 2 : if (rc < 0) {
610 0 : SPDK_ERRLOG("Could not generate PSK identity\n");
611 0 : return -1;
612 : }
613 :
614 2 : return 0;
615 : }
616 :
617 : enum nvme_tcp_hash_algorithm {
618 : NVME_TCP_HASH_ALGORITHM_NONE,
619 : NVME_TCP_HASH_ALGORITHM_SHA256,
620 : NVME_TCP_HASH_ALGORITHM_SHA384,
621 : };
622 :
623 : static inline int
624 8 : nvme_tcp_derive_retained_psk(const uint8_t *psk_in, uint64_t psk_in_size, const char *hostnqn,
625 : uint8_t *psk_out, uint64_t psk_out_len, enum nvme_tcp_hash_algorithm psk_retained_hash)
626 : {
627 : EVP_PKEY_CTX *ctx;
628 8 : uint64_t digest_len;
629 8 : uint8_t hkdf_info[NVME_TCP_HKDF_INFO_MAX_LEN] = {};
630 8 : const char *label = "tls13 HostNQN";
631 : size_t pos, labellen, nqnlen;
632 : const EVP_MD *hash;
633 : int rc, hkdf_info_size;
634 :
635 8 : labellen = strlen(label);
636 8 : nqnlen = strlen(hostnqn);
637 8 : assert(nqnlen <= SPDK_NVMF_NQN_MAX_LEN);
638 :
639 8 : *(uint16_t *)&hkdf_info[0] = htons(psk_in_size);
640 8 : pos = sizeof(uint16_t);
641 8 : hkdf_info[pos] = (uint8_t)labellen;
642 8 : pos += sizeof(uint8_t);
643 8 : memcpy(&hkdf_info[pos], label, labellen);
644 8 : pos += labellen;
645 8 : hkdf_info[pos] = (uint8_t)nqnlen;
646 8 : pos += sizeof(uint8_t);
647 8 : memcpy(&hkdf_info[pos], hostnqn, nqnlen);
648 8 : pos += nqnlen;
649 8 : hkdf_info_size = pos;
650 :
651 8 : switch (psk_retained_hash) {
652 6 : case NVME_TCP_HASH_ALGORITHM_SHA256:
653 6 : digest_len = SHA256_DIGEST_LENGTH;
654 6 : hash = EVP_sha256();
655 6 : break;
656 1 : case NVME_TCP_HASH_ALGORITHM_SHA384:
657 1 : digest_len = SHA384_DIGEST_LENGTH;
658 1 : hash = EVP_sha384();
659 1 : break;
660 1 : default:
661 1 : SPDK_ERRLOG("Unknown PSK hash requested!\n");
662 1 : return -EOPNOTSUPP;
663 : }
664 :
665 7 : if (digest_len > psk_out_len) {
666 1 : SPDK_ERRLOG("Insufficient buffer size for out key!\n");
667 1 : return -EINVAL;
668 : }
669 :
670 6 : ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL);
671 6 : if (!ctx) {
672 0 : SPDK_ERRLOG("Unable to initialize EVP_PKEY_CTX!\n");
673 0 : return -ENOMEM;
674 : }
675 :
676 : /* EVP_PKEY_* functions returns 1 as a success code and 0 or negative on failure. */
677 6 : if (EVP_PKEY_derive_init(ctx) != 1) {
678 0 : SPDK_ERRLOG("Unable to initialize key derivation ctx for HKDF!\n");
679 0 : rc = -ENOMEM;
680 0 : goto end;
681 : }
682 6 : if (EVP_PKEY_CTX_set_hkdf_md(ctx, hash) != 1) {
683 0 : SPDK_ERRLOG("Unable to set hash for HKDF!\n");
684 0 : rc = -EOPNOTSUPP;
685 0 : goto end;
686 : }
687 6 : if (EVP_PKEY_CTX_set1_hkdf_key(ctx, psk_in, psk_in_size) != 1) {
688 0 : SPDK_ERRLOG("Unable to set PSK key for HKDF!\n");
689 0 : rc = -ENOBUFS;
690 0 : goto end;
691 : }
692 :
693 6 : if (EVP_PKEY_CTX_add1_hkdf_info(ctx, hkdf_info, hkdf_info_size) != 1) {
694 0 : SPDK_ERRLOG("Unable to set info label for HKDF!\n");
695 0 : rc = -ENOBUFS;
696 0 : goto end;
697 : }
698 6 : if (EVP_PKEY_CTX_set1_hkdf_salt(ctx, NULL, 0) != 1) {
699 0 : SPDK_ERRLOG("Unable to set salt for HKDF!\n");
700 0 : rc = -EINVAL;
701 0 : goto end;
702 : }
703 6 : if (EVP_PKEY_derive(ctx, psk_out, &digest_len) != 1) {
704 0 : SPDK_ERRLOG("Unable to derive the PSK key!\n");
705 0 : rc = -EINVAL;
706 0 : goto end;
707 : }
708 :
709 6 : rc = digest_len;
710 :
711 6 : end:
712 6 : EVP_PKEY_CTX_free(ctx);
713 6 : return rc;
714 : }
715 :
716 : static inline int
717 4 : nvme_tcp_derive_tls_psk(const uint8_t *psk_in, uint64_t psk_in_size, const char *psk_identity,
718 : uint8_t *psk_out, uint64_t psk_out_size, enum nvme_tcp_cipher_suite tls_cipher_suite)
719 : {
720 : EVP_PKEY_CTX *ctx;
721 4 : uint64_t digest_len = 0;
722 4 : char hkdf_info[NVME_TCP_HKDF_INFO_MAX_LEN] = {};
723 4 : const char *label = "tls13 nvme-tls-psk";
724 : size_t pos, labellen, idlen;
725 : const EVP_MD *hash;
726 : int rc, hkdf_info_size;
727 :
728 4 : if (tls_cipher_suite == NVME_TCP_CIPHER_AES_128_GCM_SHA256) {
729 2 : digest_len = SHA256_DIGEST_LENGTH;
730 2 : hash = EVP_sha256();
731 2 : } else if (tls_cipher_suite == NVME_TCP_CIPHER_AES_256_GCM_SHA384) {
732 1 : digest_len = SHA384_DIGEST_LENGTH;
733 1 : hash = EVP_sha384();
734 : } else {
735 1 : SPDK_ERRLOG("Unknown cipher suite requested!\n");
736 1 : return -EOPNOTSUPP;
737 : }
738 :
739 3 : labellen = strlen(label);
740 3 : idlen = strlen(psk_identity);
741 3 : if (idlen > UINT8_MAX) {
742 0 : SPDK_ERRLOG("Invalid PSK ID: too long\n");
743 0 : return -1;
744 : }
745 :
746 3 : *(uint16_t *)&hkdf_info[0] = htons(psk_in_size);
747 3 : pos = sizeof(uint16_t);
748 3 : hkdf_info[pos] = (uint8_t)labellen;
749 3 : pos += sizeof(uint8_t);
750 3 : memcpy(&hkdf_info[pos], label, labellen);
751 3 : pos += labellen;
752 3 : hkdf_info[pos] = (uint8_t)idlen;
753 3 : pos += sizeof(uint8_t);
754 3 : memcpy(&hkdf_info[pos], psk_identity, idlen);
755 3 : pos += idlen;
756 3 : hkdf_info_size = pos;
757 :
758 3 : if (digest_len > psk_out_size) {
759 1 : SPDK_ERRLOG("Insufficient buffer size for out key!\n");
760 1 : return -1;
761 : }
762 :
763 2 : ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL);
764 2 : if (!ctx) {
765 0 : SPDK_ERRLOG("Unable to initialize EVP_PKEY_CTX!\n");
766 0 : return -1;
767 : }
768 :
769 2 : if (EVP_PKEY_derive_init(ctx) != 1) {
770 0 : SPDK_ERRLOG("Unable to initialize key derivation ctx for HKDF!\n");
771 0 : rc = -ENOMEM;
772 0 : goto end;
773 : }
774 2 : if (EVP_PKEY_CTX_set_hkdf_md(ctx, hash) != 1) {
775 0 : SPDK_ERRLOG("Unable to set hash method for HKDF!\n");
776 0 : rc = -EOPNOTSUPP;
777 0 : goto end;
778 : }
779 2 : if (EVP_PKEY_CTX_set1_hkdf_key(ctx, psk_in, psk_in_size) != 1) {
780 0 : SPDK_ERRLOG("Unable to set PSK key for HKDF!\n");
781 0 : rc = -ENOBUFS;
782 0 : goto end;
783 : }
784 2 : if (EVP_PKEY_CTX_add1_hkdf_info(ctx, hkdf_info, hkdf_info_size) != 1) {
785 0 : SPDK_ERRLOG("Unable to set info label for HKDF!\n");
786 0 : rc = -ENOBUFS;
787 0 : goto end;
788 : }
789 2 : if (EVP_PKEY_CTX_set1_hkdf_salt(ctx, NULL, 0) != 1) {
790 0 : SPDK_ERRLOG("Unable to set salt for HKDF!\n");
791 0 : rc = -EINVAL;
792 0 : goto end;
793 : }
794 2 : if (EVP_PKEY_derive(ctx, psk_out, &digest_len) != 1) {
795 0 : SPDK_ERRLOG("Unable to derive the PSK key!\n");
796 0 : rc = -EINVAL;
797 0 : goto end;
798 : }
799 :
800 2 : rc = digest_len;
801 :
802 2 : end:
803 2 : EVP_PKEY_CTX_free(ctx);
804 2 : return rc;
805 : }
806 :
807 : static inline int
808 1 : nvme_tcp_parse_interchange_psk(const char *psk_in, uint8_t *psk_out, size_t psk_out_size,
809 : uint64_t *psk_out_decoded_size, uint8_t *hash)
810 : {
811 1 : const char *delim = ":";
812 1 : char psk_cpy[SPDK_TLS_PSK_MAX_LEN] = {};
813 1 : uint8_t psk_base64_decoded[SPDK_TLS_PSK_MAX_LEN] = {};
814 1 : uint64_t psk_configured_size = 0;
815 : uint32_t crc32_calc, crc32;
816 : char *psk_base64;
817 1 : uint64_t psk_base64_decoded_size = 0;
818 : int rc;
819 :
820 : /* Verify PSK format. */
821 1 : if (sscanf(psk_in, "NVMeTLSkey-1:%02hhx:", hash) != 1 || psk_in[strlen(psk_in) - 1] != delim[0]) {
822 0 : SPDK_ERRLOG("Invalid format of PSK interchange!\n");
823 0 : return -EINVAL;
824 : }
825 :
826 1 : if (strlen(psk_in) >= SPDK_TLS_PSK_MAX_LEN) {
827 0 : SPDK_ERRLOG("PSK interchange exceeds maximum %d characters!\n", SPDK_TLS_PSK_MAX_LEN);
828 0 : return -EINVAL;
829 : }
830 1 : if (*hash != NVME_TCP_HASH_ALGORITHM_NONE && *hash != NVME_TCP_HASH_ALGORITHM_SHA256 &&
831 0 : *hash != NVME_TCP_HASH_ALGORITHM_SHA384) {
832 0 : SPDK_ERRLOG("Invalid PSK length!\n");
833 0 : return -EINVAL;
834 : }
835 :
836 : /* Check provided hash function string. */
837 1 : memcpy(psk_cpy, psk_in, strlen(psk_in));
838 1 : strtok(psk_cpy, delim);
839 1 : strtok(NULL, delim);
840 :
841 1 : psk_base64 = strtok(NULL, delim);
842 1 : if (psk_base64 == NULL) {
843 0 : SPDK_ERRLOG("Could not get base64 string from PSK interchange!\n");
844 0 : return -EINVAL;
845 : }
846 :
847 1 : rc = spdk_base64_decode(psk_base64_decoded, &psk_base64_decoded_size, psk_base64);
848 1 : if (rc) {
849 0 : SPDK_ERRLOG("Could not decode base64 PSK!\n");
850 0 : return -EINVAL;
851 : }
852 :
853 1 : switch (*hash) {
854 1 : case NVME_TCP_HASH_ALGORITHM_SHA256:
855 1 : psk_configured_size = SHA256_DIGEST_LENGTH;
856 1 : break;
857 0 : case NVME_TCP_HASH_ALGORITHM_SHA384:
858 0 : psk_configured_size = SHA384_DIGEST_LENGTH;
859 0 : break;
860 0 : case NVME_TCP_HASH_ALGORITHM_NONE:
861 0 : if (psk_base64_decoded_size == SHA256_DIGEST_LENGTH + SPDK_CRC32_SIZE_BYTES) {
862 0 : psk_configured_size = SHA256_DIGEST_LENGTH;
863 0 : } else if (psk_base64_decoded_size == SHA384_DIGEST_LENGTH + SPDK_CRC32_SIZE_BYTES) {
864 0 : psk_configured_size = SHA384_DIGEST_LENGTH;
865 : }
866 0 : break;
867 0 : default:
868 0 : SPDK_ERRLOG("Invalid key: unsupported key hash\n");
869 0 : assert(0);
870 : return -EINVAL;
871 : }
872 1 : if (psk_base64_decoded_size != psk_configured_size + SPDK_CRC32_SIZE_BYTES) {
873 0 : SPDK_ERRLOG("Invalid key: unsupported key length\n");
874 0 : return -EINVAL;
875 : }
876 :
877 1 : crc32 = from_le32(&psk_base64_decoded[psk_configured_size]);
878 :
879 1 : crc32_calc = spdk_crc32_ieee_update(psk_base64_decoded, psk_configured_size, ~0);
880 1 : crc32_calc = ~crc32_calc;
881 :
882 1 : if (crc32 != crc32_calc) {
883 0 : SPDK_ERRLOG("CRC-32 checksums do not match!\n");
884 0 : return -EINVAL;
885 : }
886 :
887 1 : if (psk_configured_size > psk_out_size) {
888 0 : SPDK_ERRLOG("Insufficient buffer size: %lu for configured PSK of size: %lu!\n",
889 : psk_out_size, psk_configured_size);
890 0 : return -ENOBUFS;
891 : }
892 1 : memcpy(psk_out, psk_base64_decoded, psk_configured_size);
893 1 : *psk_out_decoded_size = psk_configured_size;
894 :
895 1 : return rc;
896 : }
897 :
898 : #endif /* SPDK_INTERNAL_NVME_TCP_H */
|