Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * Copyright 2023 Solidigm All Rights Reserved
4 : * All rights reserved.
5 : */
6 :
7 :
8 : #include "spdk/bdev.h"
9 : #include "spdk/bdev_module.h"
10 : #include "spdk/ftl.h"
11 : #include "spdk/string.h"
12 :
13 : #include "ftl_nv_cache.h"
14 : #include "ftl_nv_cache_io.h"
15 : #include "ftl_core.h"
16 : #include "ftl_band.h"
17 : #include "utils/ftl_addr_utils.h"
18 : #include "mngt/ftl_mngt.h"
19 :
20 : static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
21 : static struct ftl_nv_cache_compactor *compactor_alloc(struct spdk_ftl_dev *dev);
22 : static void compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor);
23 : static void compaction_process_ftl_done(struct ftl_rq *rq);
24 : static void compaction_process_read_entry(void *arg);
25 : static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
26 : const struct ftl_property *property,
27 : struct spdk_json_write_ctx *w);
28 :
29 : static inline void
30 0 : nvc_validate_md(struct ftl_nv_cache *nv_cache,
31 : struct ftl_nv_cache_chunk_md *chunk_md)
32 : {
33 0 : struct ftl_md *md = nv_cache->md;
34 0 : void *buffer = ftl_md_get_buffer(md);
35 0 : uint64_t size = ftl_md_get_buffer_size(md);
36 0 : void *ptr = chunk_md;
37 :
38 0 : if (ptr < buffer) {
39 0 : ftl_abort();
40 : }
41 :
42 0 : ptr += sizeof(*chunk_md);
43 0 : if (ptr > buffer + size) {
44 0 : ftl_abort();
45 : }
46 0 : }
47 :
48 : static inline uint64_t
49 0 : nvc_data_offset(struct ftl_nv_cache *nv_cache)
50 : {
51 0 : return 0;
52 : }
53 :
54 : static inline uint64_t
55 0 : nvc_data_blocks(struct ftl_nv_cache *nv_cache)
56 : {
57 0 : return nv_cache->chunk_blocks * nv_cache->chunk_count;
58 : }
59 :
60 : size_t
61 0 : ftl_nv_cache_chunk_tail_md_num_blocks(const struct ftl_nv_cache *nv_cache)
62 : {
63 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache,
64 : struct spdk_ftl_dev, nv_cache);
65 0 : return spdk_divide_round_up(dev->layout.nvc.chunk_data_blocks * dev->layout.l2p.addr_size,
66 : FTL_BLOCK_SIZE);
67 : }
68 :
69 : static size_t
70 0 : nv_cache_p2l_map_pool_elem_size(const struct ftl_nv_cache *nv_cache)
71 : {
72 : /* Map pool element holds the whole tail md */
73 0 : return nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE;
74 : }
75 :
76 : static uint64_t
77 0 : get_chunk_idx(struct ftl_nv_cache_chunk *chunk)
78 : {
79 0 : struct ftl_nv_cache_chunk *first_chunk = chunk->nv_cache->chunks;
80 :
81 0 : return (chunk->offset - first_chunk->offset) / chunk->nv_cache->chunk_blocks;
82 : }
83 :
84 : static void
85 0 : ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev)
86 : {
87 0 : struct ftl_nv_cache *nvc = &dev->nv_cache;
88 0 : uint64_t usable_chunks = nvc->chunk_count - nvc->chunk_inactive_count;
89 :
90 : /* Start compaction when full chunks exceed given % of entire active chunks */
91 0 : nvc->chunk_compaction_threshold = usable_chunks *
92 0 : dev->conf.nv_cache.chunk_compaction_threshold /
93 : 100;
94 :
95 0 : nvc->throttle.interval_tsc = FTL_NV_CACHE_THROTTLE_INTERVAL_MS *
96 0 : (spdk_get_ticks_hz() / 1000);
97 :
98 0 : nvc->chunk_free_target = spdk_divide_round_up(usable_chunks *
99 0 : dev->conf.nv_cache.chunk_free_target,
100 : 100);
101 0 : }
102 :
103 : struct nvc_scrub_ctx {
104 : uint64_t chunk_no;
105 : nvc_scrub_cb cb;
106 : void *cb_ctx;
107 :
108 : struct ftl_layout_region reg_chunk;
109 : struct ftl_md *md_chunk;
110 : };
111 :
112 : static int
113 0 : nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx)
114 : {
115 0 : while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) {
116 0 : if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
117 0 : return 0;
118 : }
119 :
120 : /* Move the dummy region along with the active chunk */
121 0 : scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
122 0 : scrub_ctx->chunk_no++;
123 : }
124 0 : return -ENOENT;
125 : }
126 :
127 : static void
128 0 : nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
129 : {
130 0 : struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
131 0 : union ftl_md_vss vss;
132 :
133 : /* Move to the next chunk */
134 0 : scrub_ctx->chunk_no++;
135 0 : scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
136 :
137 0 : FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n",
138 : scrub_ctx->chunk_no, dev->layout.nvc.chunk_count);
139 :
140 0 : if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
141 : /* IO error or no more active chunks found. Scrubbing finished. */
142 0 : scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status);
143 0 : ftl_md_destroy(scrub_ctx->md_chunk, 0);
144 0 : free(scrub_ctx);
145 0 : return;
146 : }
147 :
148 : /* Scrub the next chunk */
149 0 : vss.version.md_version = 0;
150 0 : vss.nv_cache.lba = FTL_ADDR_INVALID;
151 :
152 0 : scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
153 0 : scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
154 :
155 0 : ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
156 : }
157 :
158 : void
159 0 : ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx)
160 : {
161 0 : struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx));
162 0 : union ftl_md_vss vss;
163 :
164 0 : if (!scrub_ctx) {
165 0 : cb(dev, cb_ctx, -ENOMEM);
166 0 : return;
167 : }
168 :
169 0 : scrub_ctx->cb = cb;
170 0 : scrub_ctx->cb_ctx = cb_ctx;
171 :
172 : /* Setup a dummy region for the first chunk */
173 0 : scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC);
174 0 : scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
175 0 : scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
176 0 : scrub_ctx->reg_chunk.current.version = 0;
177 0 : scrub_ctx->reg_chunk.current.offset = 0;
178 0 : scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks;
179 0 : scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE;
180 0 : scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks;
181 0 : scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
182 0 : scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
183 0 : scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;
184 :
185 : /* Setup an MD object for the region */
186 0 : scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks,
187 : scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM,
188 0 : &scrub_ctx->reg_chunk);
189 :
190 0 : if (!scrub_ctx->md_chunk) {
191 0 : free(scrub_ctx);
192 0 : cb(dev, cb_ctx, -ENOMEM);
193 0 : return;
194 : }
195 :
196 0 : if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
197 : /* No active chunks found */
198 0 : ftl_md_destroy(scrub_ctx->md_chunk, 0);
199 0 : free(scrub_ctx);
200 0 : cb(dev, cb_ctx, -ENOENT);
201 0 : return;
202 : }
203 :
204 : /* Scrub the first chunk */
205 0 : vss.version.md_version = 0;
206 0 : vss.nv_cache.lba = FTL_ADDR_INVALID;
207 :
208 0 : scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
209 0 : scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;
210 :
211 0 : ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
212 0 : return;
213 : }
214 :
215 : int
216 0 : ftl_nv_cache_init(struct spdk_ftl_dev *dev)
217 : {
218 0 : struct ftl_nv_cache *nv_cache = &dev->nv_cache;
219 : struct ftl_nv_cache_chunk *chunk;
220 : struct ftl_nv_cache_chunk_md *md;
221 : struct ftl_nv_cache_compactor *compactor;
222 : uint64_t i, offset;
223 :
224 0 : nv_cache->halt = true;
225 :
226 0 : nv_cache->md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
227 0 : if (!nv_cache->md) {
228 0 : FTL_ERRLOG(dev, "No NV cache metadata object\n");
229 0 : return -1;
230 : }
231 :
232 0 : nv_cache->md_pool = ftl_mempool_create(dev->conf.user_io_pool_size,
233 0 : nv_cache->md_size * dev->xfer_size,
234 : FTL_BLOCK_SIZE, SPDK_ENV_SOCKET_ID_ANY);
235 0 : if (!nv_cache->md_pool) {
236 0 : FTL_ERRLOG(dev, "Failed to initialize NV cache metadata pool\n");
237 0 : return -1;
238 : }
239 :
240 : /*
241 : * Initialize chunk info
242 : */
243 0 : nv_cache->chunk_blocks = dev->layout.nvc.chunk_data_blocks;
244 0 : nv_cache->chunk_count = dev->layout.nvc.chunk_count;
245 0 : nv_cache->tail_md_chunk_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(nv_cache);
246 :
247 : /* Allocate chunks */
248 0 : nv_cache->chunks = calloc(nv_cache->chunk_count,
249 : sizeof(nv_cache->chunks[0]));
250 0 : if (!nv_cache->chunks) {
251 0 : FTL_ERRLOG(dev, "Failed to initialize NV cache chunks\n");
252 0 : return -1;
253 : }
254 :
255 0 : TAILQ_INIT(&nv_cache->chunk_free_list);
256 0 : TAILQ_INIT(&nv_cache->chunk_open_list);
257 0 : TAILQ_INIT(&nv_cache->chunk_full_list);
258 0 : TAILQ_INIT(&nv_cache->chunk_comp_list);
259 0 : TAILQ_INIT(&nv_cache->chunk_inactive_list);
260 0 : TAILQ_INIT(&nv_cache->needs_free_persist_list);
261 :
262 : /* First chunk metadata */
263 0 : md = ftl_md_get_buffer(nv_cache->md);
264 0 : if (!md) {
265 0 : FTL_ERRLOG(dev, "No NV cache metadata\n");
266 0 : return -1;
267 : }
268 :
269 0 : chunk = nv_cache->chunks;
270 0 : offset = nvc_data_offset(nv_cache);
271 0 : for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
272 0 : chunk->nv_cache = nv_cache;
273 0 : chunk->md = md;
274 0 : chunk->md->version = FTL_NVC_VERSION_CURRENT;
275 0 : nvc_validate_md(nv_cache, md);
276 0 : chunk->offset = offset;
277 0 : offset += nv_cache->chunk_blocks;
278 :
279 0 : if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
280 0 : nv_cache->chunk_free_count++;
281 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
282 : } else {
283 0 : chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
284 0 : nv_cache->chunk_inactive_count++;
285 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
286 : }
287 : }
288 0 : assert(nv_cache->chunk_free_count + nv_cache->chunk_inactive_count == nv_cache->chunk_count);
289 0 : assert(offset <= nvc_data_offset(nv_cache) + nvc_data_blocks(nv_cache));
290 :
291 0 : TAILQ_INIT(&nv_cache->compactor_list);
292 0 : for (i = 0; i < FTL_NV_CACHE_NUM_COMPACTORS; i++) {
293 0 : compactor = compactor_alloc(dev);
294 :
295 0 : if (!compactor) {
296 0 : FTL_ERRLOG(dev, "Cannot allocate compaction process\n");
297 0 : return -1;
298 : }
299 :
300 0 : TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
301 : }
302 :
303 : #define FTL_MAX_OPEN_CHUNKS 2
304 0 : nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
305 : nv_cache_p2l_map_pool_elem_size(nv_cache),
306 : FTL_BLOCK_SIZE,
307 : SPDK_ENV_SOCKET_ID_ANY);
308 0 : if (!nv_cache->p2l_pool) {
309 0 : return -ENOMEM;
310 : }
311 :
312 : /* One entry per open chunk */
313 0 : nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
314 : sizeof(struct ftl_nv_cache_chunk_md),
315 : FTL_BLOCK_SIZE,
316 : SPDK_ENV_SOCKET_ID_ANY);
317 0 : if (!nv_cache->chunk_md_pool) {
318 0 : return -ENOMEM;
319 : }
320 :
321 : /* Each compactor can be reading a different chunk which it needs to switch state to free to at the end,
322 : * plus one backup each for high invalidity chunks processing (if there's a backlog of chunks with extremely
323 : * small, even 0, validity then they can be processed by the compactors quickly and trigger a lot of updates
324 : * to free state at once) */
325 0 : nv_cache->free_chunk_md_pool = ftl_mempool_create(2 * FTL_NV_CACHE_NUM_COMPACTORS,
326 : sizeof(struct ftl_nv_cache_chunk_md),
327 : FTL_BLOCK_SIZE,
328 : SPDK_ENV_SOCKET_ID_ANY);
329 0 : if (!nv_cache->free_chunk_md_pool) {
330 0 : return -ENOMEM;
331 : }
332 :
333 0 : ftl_nv_cache_init_update_limits(dev);
334 0 : ftl_property_register(dev, "cache_device", NULL, 0, NULL, NULL, ftl_property_dump_cache_dev, NULL,
335 : NULL, true);
336 0 : return 0;
337 : }
338 :
339 : void
340 0 : ftl_nv_cache_deinit(struct spdk_ftl_dev *dev)
341 : {
342 0 : struct ftl_nv_cache *nv_cache = &dev->nv_cache;
343 : struct ftl_nv_cache_compactor *compactor;
344 :
345 0 : while (!TAILQ_EMPTY(&nv_cache->compactor_list)) {
346 0 : compactor = TAILQ_FIRST(&nv_cache->compactor_list);
347 0 : TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
348 :
349 0 : compactor_free(dev, compactor);
350 : }
351 :
352 0 : ftl_mempool_destroy(nv_cache->md_pool);
353 0 : ftl_mempool_destroy(nv_cache->p2l_pool);
354 0 : ftl_mempool_destroy(nv_cache->chunk_md_pool);
355 0 : ftl_mempool_destroy(nv_cache->free_chunk_md_pool);
356 0 : nv_cache->md_pool = NULL;
357 0 : nv_cache->p2l_pool = NULL;
358 0 : nv_cache->chunk_md_pool = NULL;
359 0 : nv_cache->free_chunk_md_pool = NULL;
360 :
361 0 : free(nv_cache->chunks);
362 0 : nv_cache->chunks = NULL;
363 0 : }
364 :
365 : static uint64_t
366 0 : chunk_get_free_space(struct ftl_nv_cache *nv_cache,
367 : struct ftl_nv_cache_chunk *chunk)
368 : {
369 0 : assert(chunk->md->write_pointer + nv_cache->tail_md_chunk_blocks <=
370 : nv_cache->chunk_blocks);
371 0 : return nv_cache->chunk_blocks - chunk->md->write_pointer -
372 0 : nv_cache->tail_md_chunk_blocks;
373 : }
374 :
375 : static bool
376 0 : chunk_is_closed(struct ftl_nv_cache_chunk *chunk)
377 : {
378 0 : return chunk->md->write_pointer == chunk->nv_cache->chunk_blocks;
379 : }
380 :
381 : static void ftl_chunk_close(struct ftl_nv_cache_chunk *chunk);
382 :
383 : static uint64_t
384 0 : ftl_nv_cache_get_wr_buffer(struct ftl_nv_cache *nv_cache, struct ftl_io *io)
385 : {
386 0 : uint64_t address = FTL_LBA_INVALID;
387 0 : uint64_t num_blocks = io->num_blocks;
388 : uint64_t free_space;
389 : struct ftl_nv_cache_chunk *chunk;
390 :
391 : do {
392 0 : chunk = nv_cache->chunk_current;
393 : /* Chunk has been closed so pick new one */
394 0 : if (chunk && chunk_is_closed(chunk)) {
395 0 : chunk = NULL;
396 : }
397 :
398 0 : if (!chunk) {
399 0 : chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
400 0 : if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
401 0 : TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
402 0 : nv_cache->chunk_current = chunk;
403 : } else {
404 : break;
405 : }
406 : }
407 :
408 0 : free_space = chunk_get_free_space(nv_cache, chunk);
409 :
410 0 : if (free_space >= num_blocks) {
411 : /* Enough space in chunk */
412 :
413 : /* Calculate address in NV cache */
414 0 : address = chunk->offset + chunk->md->write_pointer;
415 :
416 : /* Set chunk in IO */
417 0 : io->nv_cache_chunk = chunk;
418 :
419 : /* Move write pointer */
420 0 : chunk->md->write_pointer += num_blocks;
421 0 : break;
422 : }
423 :
424 : /* Not enough space in nv_cache_chunk */
425 0 : nv_cache->chunk_current = NULL;
426 :
427 0 : if (0 == free_space) {
428 0 : continue;
429 : }
430 :
431 0 : chunk->md->blocks_skipped = free_space;
432 0 : chunk->md->blocks_written += free_space;
433 0 : chunk->md->write_pointer += free_space;
434 :
435 0 : if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
436 0 : ftl_chunk_close(chunk);
437 : }
438 : } while (1);
439 :
440 0 : return address;
441 : }
442 :
443 : void
444 0 : ftl_nv_cache_fill_md(struct ftl_io *io)
445 : {
446 0 : struct ftl_nv_cache_chunk *chunk = io->nv_cache_chunk;
447 : uint64_t i;
448 0 : union ftl_md_vss *metadata = io->md;
449 0 : uint64_t lba = ftl_io_get_lba(io, 0);
450 :
451 0 : for (i = 0; i < io->num_blocks; ++i, lba++, metadata++) {
452 0 : metadata->nv_cache.lba = lba;
453 0 : metadata->nv_cache.seq_id = chunk->md->seq_id;
454 : }
455 0 : }
456 :
457 : uint64_t
458 0 : chunk_tail_md_offset(struct ftl_nv_cache *nv_cache)
459 : {
460 0 : return nv_cache->chunk_blocks - nv_cache->tail_md_chunk_blocks;
461 : }
462 :
463 : static void
464 0 : chunk_advance_blocks(struct ftl_nv_cache *nv_cache, struct ftl_nv_cache_chunk *chunk,
465 : uint64_t advanced_blocks)
466 : {
467 0 : chunk->md->blocks_written += advanced_blocks;
468 :
469 0 : assert(chunk->md->blocks_written <= nv_cache->chunk_blocks);
470 :
471 0 : if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
472 0 : ftl_chunk_close(chunk);
473 : }
474 0 : }
475 :
476 : static uint64_t
477 0 : chunk_user_blocks_written(struct ftl_nv_cache_chunk *chunk)
478 : {
479 0 : return chunk->md->blocks_written - chunk->md->blocks_skipped -
480 0 : chunk->nv_cache->tail_md_chunk_blocks;
481 : }
482 :
483 : static bool
484 0 : is_chunk_compacted(struct ftl_nv_cache_chunk *chunk)
485 : {
486 0 : assert(chunk->md->blocks_written != 0);
487 :
488 0 : if (chunk_user_blocks_written(chunk) == chunk->md->blocks_compacted) {
489 0 : return true;
490 : }
491 :
492 0 : return false;
493 : }
494 :
495 : static int
496 0 : ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
497 : {
498 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
499 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
500 :
501 0 : p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);
502 :
503 0 : if (!p2l_map->chunk_dma_md) {
504 0 : return -ENOMEM;
505 : }
506 :
507 0 : ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
508 0 : return 0;
509 : }
510 :
511 : static void
512 0 : ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
513 : {
514 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
515 :
516 0 : ftl_mempool_put(chunk->nv_cache->chunk_md_pool, p2l_map->chunk_dma_md);
517 0 : p2l_map->chunk_dma_md = NULL;
518 0 : }
519 :
520 : static void
521 0 : ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
522 : {
523 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
524 :
525 : /* Reset chunk */
526 0 : ftl_nv_cache_chunk_md_initialize(chunk->md);
527 :
528 0 : TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
529 0 : nv_cache->chunk_free_persist_count++;
530 0 : }
531 :
532 : static int
533 0 : ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
534 : {
535 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
536 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
537 :
538 0 : p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);
539 0 : if (!p2l_map->chunk_dma_md) {
540 0 : return -ENOMEM;
541 : }
542 :
543 0 : ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
544 0 : return 0;
545 : }
546 :
547 : static void
548 0 : ftl_chunk_free_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
549 : {
550 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
551 :
552 0 : ftl_mempool_put(chunk->nv_cache->free_chunk_md_pool, p2l_map->chunk_dma_md);
553 0 : p2l_map->chunk_dma_md = NULL;
554 0 : }
555 :
556 : static void
557 0 : chunk_free_cb(int status, void *ctx)
558 : {
559 0 : struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
560 :
561 0 : if (spdk_likely(!status)) {
562 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
563 :
564 0 : nv_cache->chunk_free_persist_count--;
565 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
566 0 : nv_cache->chunk_free_count++;
567 0 : nv_cache->chunk_full_count--;
568 0 : chunk->md->state = FTL_CHUNK_STATE_FREE;
569 0 : chunk->md->close_seq_id = 0;
570 0 : ftl_chunk_free_chunk_free_entry(chunk);
571 : } else {
572 : #ifdef SPDK_FTL_RETRY_ON_ERROR
573 : ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
574 : #else
575 0 : ftl_abort();
576 : #endif
577 : }
578 0 : }
579 :
580 : static void
581 0 : ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
582 : {
583 : int rc;
584 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
585 : struct ftl_p2l_map *p2l_map;
586 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
587 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
588 0 : struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
589 :
590 0 : TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
591 0 : p2l_map = &chunk->p2l_map;
592 0 : rc = ftl_chunk_alloc_chunk_free_entry(chunk);
593 0 : if (rc) {
594 0 : break;
595 : }
596 :
597 0 : TAILQ_REMOVE(&nv_cache->needs_free_persist_list, chunk, entry);
598 :
599 0 : memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
600 0 : p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_FREE;
601 0 : p2l_map->chunk_dma_md->close_seq_id = 0;
602 0 : p2l_map->chunk_dma_md->p2l_map_checksum = 0;
603 :
604 0 : ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
605 : chunk_free_cb, chunk, &chunk->md_persist_entry_ctx);
606 : }
607 0 : }
608 :
609 : static void
610 0 : compaction_stats_update(struct ftl_nv_cache_chunk *chunk)
611 : {
612 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
613 0 : struct compaction_bw_stats *compaction_bw = &nv_cache->compaction_recent_bw;
614 : double *ptr;
615 :
616 0 : if (spdk_unlikely(chunk->compaction_length_tsc == 0)) {
617 0 : return;
618 : }
619 :
620 0 : if (spdk_likely(compaction_bw->count == FTL_NV_CACHE_COMPACTION_SMA_N)) {
621 0 : ptr = compaction_bw->buf + compaction_bw->first;
622 0 : compaction_bw->first++;
623 0 : if (compaction_bw->first == FTL_NV_CACHE_COMPACTION_SMA_N) {
624 0 : compaction_bw->first = 0;
625 : }
626 0 : compaction_bw->sum -= *ptr;
627 : } else {
628 0 : ptr = compaction_bw->buf + compaction_bw->count;
629 0 : compaction_bw->count++;
630 : }
631 :
632 0 : *ptr = (double)chunk->md->blocks_compacted * FTL_BLOCK_SIZE / chunk->compaction_length_tsc;
633 0 : chunk->compaction_length_tsc = 0;
634 :
635 0 : compaction_bw->sum += *ptr;
636 0 : nv_cache->compaction_sma = compaction_bw->sum / compaction_bw->count;
637 : }
638 :
639 : static void
640 0 : chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)
641 : {
642 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
643 0 : uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
644 :
645 0 : chunk->compaction_length_tsc += tsc - chunk->compaction_start_tsc;
646 0 : chunk->compaction_start_tsc = tsc;
647 :
648 0 : chunk->md->blocks_compacted += num_blocks;
649 0 : assert(chunk->md->blocks_compacted <= chunk_user_blocks_written(chunk));
650 0 : if (!is_chunk_compacted(chunk)) {
651 0 : return;
652 : }
653 :
654 : /* Remove chunk from compacted list */
655 0 : TAILQ_REMOVE(&nv_cache->chunk_comp_list, chunk, entry);
656 0 : nv_cache->chunk_comp_count--;
657 :
658 0 : compaction_stats_update(chunk);
659 :
660 0 : ftl_chunk_free(chunk);
661 : }
662 :
663 : static bool
664 0 : is_compaction_required_for_upgrade(struct ftl_nv_cache *nv_cache)
665 : {
666 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
667 :
668 0 : if (dev->conf.prep_upgrade_on_shutdown) {
669 0 : if (nv_cache->chunk_full_count || nv_cache->chunk_open_count) {
670 0 : return true;
671 : }
672 : }
673 :
674 0 : return false;
675 : }
676 :
677 : static bool
678 0 : is_compaction_required(struct ftl_nv_cache *nv_cache)
679 : {
680 0 : if (spdk_unlikely(nv_cache->halt)) {
681 0 : return is_compaction_required_for_upgrade(nv_cache);
682 : }
683 :
684 0 : if (nv_cache->chunk_full_count >= nv_cache->chunk_compaction_threshold) {
685 0 : return true;
686 : }
687 :
688 0 : return false;
689 : }
690 :
691 : static void compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor);
692 : static void compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp);
693 :
694 : static void
695 0 : _compaction_process_pin_lba(void *_comp)
696 : {
697 0 : struct ftl_nv_cache_compactor *comp = _comp;
698 :
699 0 : compaction_process_pin_lba(comp);
700 0 : }
701 :
702 : static void
703 0 : compaction_process_pin_lba_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
704 : {
705 0 : struct ftl_nv_cache_compactor *comp = pin_ctx->cb_ctx;
706 0 : struct ftl_rq *rq = comp->rq;
707 :
708 0 : if (status) {
709 0 : rq->iter.status = status;
710 0 : pin_ctx->lba = FTL_LBA_INVALID;
711 : }
712 :
713 0 : if (--rq->iter.remaining == 0) {
714 0 : if (rq->iter.status) {
715 : /* unpin and try again */
716 0 : ftl_rq_unpin(rq);
717 0 : spdk_thread_send_msg(spdk_get_thread(), _compaction_process_pin_lba, comp);
718 0 : return;
719 : }
720 :
721 0 : compaction_process_finish_read(comp);
722 : }
723 : }
724 :
725 : static void
726 0 : compaction_process_pin_lba(struct ftl_nv_cache_compactor *comp)
727 : {
728 0 : struct ftl_rq *rq = comp->rq;
729 0 : struct spdk_ftl_dev *dev = rq->dev;
730 : struct ftl_rq_entry *entry;
731 :
732 0 : assert(rq->iter.count);
733 0 : rq->iter.remaining = rq->iter.count;
734 0 : rq->iter.status = 0;
735 :
736 0 : FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
737 0 : struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
738 0 : struct ftl_l2p_pin_ctx *pin_ctx = &entry->l2p_pin_ctx;
739 0 : union ftl_md_vss *md = entry->io_md;
740 :
741 0 : if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
742 0 : ftl_l2p_pin_skip(dev, compaction_process_pin_lba_cb, comp, pin_ctx);
743 : } else {
744 0 : ftl_l2p_pin(dev, md->nv_cache.lba, 1, compaction_process_pin_lba_cb, comp, pin_ctx);
745 : }
746 : }
747 0 : }
748 :
749 : static void
750 0 : compaction_process_read_entry_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
751 : {
752 0 : struct ftl_rq_entry *entry = arg;
753 0 : struct ftl_rq *rq = ftl_rq_from_entry(entry);
754 0 : struct spdk_ftl_dev *dev = rq->dev;
755 0 : struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
756 :
757 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_CMP, bdev_io);
758 :
759 0 : spdk_bdev_free_io(bdev_io);
760 :
761 0 : if (!success) {
762 : /* retry */
763 0 : spdk_thread_send_msg(spdk_get_thread(), compaction_process_read_entry, entry);
764 0 : return;
765 : }
766 :
767 0 : assert(rq->iter.remaining >= entry->bdev_io.num_blocks);
768 0 : rq->iter.remaining -= entry->bdev_io.num_blocks;
769 0 : if (0 == rq->iter.remaining) {
770 : /* All IOs processed, go to next phase - pining */
771 0 : compaction_process_pin_lba(compactor);
772 : }
773 : }
774 :
775 : static void
776 0 : compaction_process_read_entry(void *arg)
777 : {
778 0 : struct ftl_rq_entry *entry = arg;
779 0 : struct ftl_rq *rq = ftl_rq_from_entry(entry);
780 0 : struct spdk_ftl_dev *dev = rq->dev;
781 :
782 0 : int rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, dev->nv_cache.bdev_desc,
783 : dev->nv_cache.cache_ioch, entry->io_payload, entry->io_md,
784 : entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
785 : compaction_process_read_entry_cb, entry);
786 :
787 0 : if (spdk_unlikely(rc)) {
788 0 : if (rc == -ENOMEM) {
789 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->nv_cache.bdev_desc);
790 0 : entry->bdev_io.wait_entry.bdev = bdev;
791 0 : entry->bdev_io.wait_entry.cb_fn = compaction_process_read_entry;
792 0 : entry->bdev_io.wait_entry.cb_arg = entry;
793 0 : spdk_bdev_queue_io_wait(bdev, dev->nv_cache.cache_ioch, &entry->bdev_io.wait_entry);
794 : } else {
795 0 : ftl_abort();
796 : }
797 : }
798 :
799 0 : dev->stats.io_activity_total += entry->bdev_io.num_blocks;
800 0 : }
801 :
802 : static bool
803 0 : is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
804 : {
805 0 : assert(chunk->md->blocks_written != 0);
806 :
807 0 : if (chunk_user_blocks_written(chunk) == chunk->md->read_pointer) {
808 0 : return false;
809 : }
810 :
811 0 : return true;
812 : }
813 :
814 : static struct ftl_nv_cache_chunk *
815 0 : get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
816 : {
817 0 : struct ftl_nv_cache_chunk *chunk = NULL;
818 :
819 0 : if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
820 0 : chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
821 0 : if (is_chunk_to_read(chunk)) {
822 0 : return chunk;
823 : }
824 : }
825 :
826 0 : if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
827 0 : chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
828 0 : TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);
829 :
830 0 : assert(chunk->md->write_pointer);
831 : } else {
832 0 : return NULL;
833 : }
834 :
835 0 : if (spdk_likely(chunk)) {
836 0 : assert(chunk->md->write_pointer != 0);
837 0 : TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
838 0 : nv_cache->chunk_comp_count++;
839 : }
840 :
841 0 : return chunk;
842 : }
843 :
844 : static uint64_t
845 0 : chunk_blocks_to_read(struct ftl_nv_cache_chunk *chunk)
846 : {
847 : uint64_t blocks_written;
848 : uint64_t blocks_to_read;
849 :
850 0 : assert(chunk->md->blocks_written >= chunk->md->blocks_skipped);
851 0 : blocks_written = chunk_user_blocks_written(chunk);
852 :
853 0 : assert(blocks_written >= chunk->md->read_pointer);
854 0 : blocks_to_read = blocks_written - chunk->md->read_pointer;
855 :
856 0 : return blocks_to_read;
857 : }
858 :
859 : static void
860 0 : compactor_deactivate(struct ftl_nv_cache_compactor *compactor)
861 : {
862 0 : struct ftl_nv_cache *nv_cache = compactor->nv_cache;
863 :
864 0 : compactor->rq->iter.count = 0;
865 0 : assert(nv_cache->compaction_active_count);
866 0 : nv_cache->compaction_active_count--;
867 0 : TAILQ_INSERT_TAIL(&nv_cache->compactor_list, compactor, entry);
868 0 : }
869 :
870 : static void
871 0 : compaction_process_invalidate_entry(struct ftl_rq_entry *entry)
872 : {
873 0 : entry->addr = FTL_ADDR_INVALID;
874 0 : entry->lba = FTL_LBA_INVALID;
875 0 : entry->seq_id = 0;
876 0 : entry->owner.priv = NULL;
877 0 : }
878 :
879 : static void
880 0 : compaction_process_pad(struct ftl_nv_cache_compactor *compactor, uint64_t idx)
881 : {
882 0 : struct ftl_rq *rq = compactor->rq;
883 : struct ftl_rq_entry *entry;
884 :
885 0 : assert(idx < rq->num_blocks);
886 0 : FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[idx], entry, rq->num_blocks) {
887 0 : compaction_process_invalidate_entry(entry);
888 : }
889 0 : }
890 :
891 : static void
892 0 : compaction_process_read(struct ftl_nv_cache_compactor *compactor)
893 : {
894 0 : struct ftl_rq *rq = compactor->rq;
895 0 : struct ftl_nv_cache *nv_cache = compactor->nv_cache;
896 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
897 : struct ftl_rq_entry *entry, *io;
898 :
899 0 : assert(rq->iter.count);
900 0 : rq->iter.remaining = rq->iter.count;
901 :
902 0 : io = rq->entries;
903 0 : io->bdev_io.num_blocks = 1;
904 0 : io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
905 0 : FTL_RQ_ENTRY_LOOP_FROM(rq, &rq->entries[1], entry, rq->iter.count) {
906 0 : if (entry->addr == io->addr + io->bdev_io.num_blocks) {
907 0 : io->bdev_io.num_blocks++;
908 : } else {
909 0 : compaction_process_read_entry(io);
910 0 : io = entry;
911 0 : io->bdev_io.num_blocks = 1;
912 0 : io->bdev_io.offset_blocks = ftl_addr_to_nvc_offset(dev, io->addr);
913 : }
914 : }
915 0 : compaction_process_read_entry(io);
916 0 : }
917 :
918 : static ftl_addr
919 0 : compaction_chunk_read_pos(struct spdk_ftl_dev *dev, struct ftl_nv_cache_chunk *chunk)
920 : {
921 : ftl_addr start, pos;
922 0 : uint64_t skip, to_read = chunk_blocks_to_read(chunk);
923 :
924 0 : if (0 == to_read) {
925 0 : return FTL_ADDR_INVALID;
926 : }
927 :
928 0 : start = ftl_addr_from_nvc_offset(dev, chunk->offset + chunk->md->read_pointer);
929 0 : pos = ftl_bitmap_find_first_set(dev->valid_map, start, start + to_read - 1);
930 :
931 0 : if (pos == UINT64_MAX) {
932 0 : chunk->md->read_pointer += to_read;
933 0 : chunk_compaction_advance(chunk, to_read);
934 0 : return FTL_ADDR_INVALID;
935 : }
936 :
937 0 : assert(pos >= start);
938 0 : skip = pos - start;
939 0 : if (skip) {
940 0 : chunk->md->read_pointer += skip;
941 0 : chunk_compaction_advance(chunk, skip);
942 : }
943 :
944 0 : return pos;
945 : }
946 :
947 : static bool
948 0 : compaction_entry_read_pos(struct ftl_nv_cache *nv_cache, struct ftl_rq_entry *entry)
949 : {
950 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
951 0 : struct ftl_nv_cache_chunk *chunk = NULL;
952 0 : ftl_addr addr = FTL_ADDR_INVALID;
953 :
954 0 : while (!chunk) {
955 : /* Get currently handled chunk */
956 0 : chunk = get_chunk_for_compaction(nv_cache);
957 0 : if (!chunk) {
958 0 : return false;
959 : }
960 0 : chunk->compaction_start_tsc = spdk_thread_get_last_tsc(spdk_get_thread());
961 :
962 : /* Get next read position in chunk */
963 0 : addr = compaction_chunk_read_pos(dev, chunk);
964 0 : if (FTL_ADDR_INVALID == addr) {
965 0 : chunk = NULL;
966 : }
967 : }
968 :
969 0 : assert(FTL_ADDR_INVALID != addr);
970 :
971 : /* Set entry address info and chunk */
972 0 : entry->addr = addr;
973 0 : entry->owner.priv = chunk;
974 :
975 : /* Move read pointer in the chunk */
976 0 : chunk->md->read_pointer++;
977 :
978 0 : return true;
979 : }
980 :
981 : static void
982 0 : compaction_process_start(struct ftl_nv_cache_compactor *compactor)
983 : {
984 0 : struct ftl_rq *rq = compactor->rq;
985 0 : struct ftl_nv_cache *nv_cache = compactor->nv_cache;
986 : struct ftl_rq_entry *entry;
987 :
988 0 : assert(0 == compactor->rq->iter.count);
989 0 : FTL_RQ_ENTRY_LOOP(rq, entry, rq->num_blocks) {
990 0 : if (!compaction_entry_read_pos(nv_cache, entry)) {
991 0 : compaction_process_pad(compactor, entry->index);
992 0 : break;
993 : }
994 0 : rq->iter.count++;
995 : }
996 :
997 0 : if (rq->iter.count) {
998 : /* Schedule Read IOs */
999 0 : compaction_process_read(compactor);
1000 : } else {
1001 0 : compactor_deactivate(compactor);
1002 : }
1003 0 : }
1004 :
1005 : static void
1006 0 : compaction_process(struct ftl_nv_cache *nv_cache)
1007 : {
1008 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1009 : struct ftl_nv_cache_compactor *compactor;
1010 :
1011 0 : if (!is_compaction_required(nv_cache)) {
1012 0 : return;
1013 : }
1014 :
1015 0 : compactor = TAILQ_FIRST(&nv_cache->compactor_list);
1016 0 : if (!compactor) {
1017 0 : return;
1018 : }
1019 :
1020 0 : TAILQ_REMOVE(&nv_cache->compactor_list, compactor, entry);
1021 0 : compactor->nv_cache->compaction_active_count++;
1022 0 : compaction_process_start(compactor);
1023 0 : ftl_add_io_activity(dev);
1024 : }
1025 :
1026 : static void
1027 0 : compaction_process_ftl_done(struct ftl_rq *rq)
1028 : {
1029 0 : struct spdk_ftl_dev *dev = rq->dev;
1030 0 : struct ftl_nv_cache_compactor *compactor = rq->owner.priv;
1031 0 : struct ftl_band *band = rq->io.band;
1032 : struct ftl_rq_entry *entry;
1033 : ftl_addr addr;
1034 :
1035 0 : if (spdk_unlikely(false == rq->success)) {
1036 : /* IO error retry writing */
1037 : #ifdef SPDK_FTL_RETRY_ON_ERROR
1038 : ftl_writer_queue_rq(&dev->writer_user, rq);
1039 : return;
1040 : #else
1041 0 : ftl_abort();
1042 : #endif
1043 : }
1044 :
1045 0 : assert(rq->iter.count);
1046 :
1047 : /* Update L2P table */
1048 0 : addr = rq->io.addr;
1049 0 : FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1050 0 : struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1051 :
1052 0 : if (entry->lba != FTL_LBA_INVALID) {
1053 0 : ftl_l2p_update_base(dev, entry->lba, addr, entry->addr);
1054 0 : ftl_l2p_unpin(dev, entry->lba, 1);
1055 0 : chunk_compaction_advance(chunk, 1);
1056 : } else {
1057 0 : assert(entry->addr == FTL_ADDR_INVALID);
1058 : }
1059 :
1060 0 : addr = ftl_band_next_addr(band, addr, 1);
1061 0 : compaction_process_invalidate_entry(entry);
1062 : }
1063 :
1064 0 : compactor_deactivate(compactor);
1065 0 : }
1066 :
1067 : static void
1068 0 : compaction_process_finish_read(struct ftl_nv_cache_compactor *compactor)
1069 : {
1070 0 : struct ftl_rq *rq = compactor->rq;
1071 0 : struct spdk_ftl_dev *dev = rq->dev;
1072 : struct ftl_rq_entry *entry;
1073 : ftl_addr current_addr;
1074 0 : uint64_t skip = 0;
1075 :
1076 0 : FTL_RQ_ENTRY_LOOP(rq, entry, rq->iter.count) {
1077 0 : struct ftl_nv_cache_chunk *chunk = entry->owner.priv;
1078 0 : union ftl_md_vss *md = entry->io_md;
1079 :
1080 0 : if (md->nv_cache.lba == FTL_LBA_INVALID || md->nv_cache.seq_id != chunk->md->seq_id) {
1081 0 : skip++;
1082 0 : compaction_process_invalidate_entry(entry);
1083 0 : chunk_compaction_advance(chunk, 1);
1084 0 : continue;
1085 : }
1086 :
1087 0 : current_addr = ftl_l2p_get(dev, md->nv_cache.lba);
1088 0 : if (current_addr == entry->addr) {
1089 0 : entry->lba = md->nv_cache.lba;
1090 0 : entry->seq_id = chunk->md->seq_id;
1091 : } else {
1092 : /* This address already invalidated, just omit this block */
1093 0 : chunk_compaction_advance(chunk, 1);
1094 0 : ftl_l2p_unpin(dev, md->nv_cache.lba, 1);
1095 0 : compaction_process_invalidate_entry(entry);
1096 0 : skip++;
1097 : }
1098 : }
1099 :
1100 0 : if (skip < rq->iter.count) {
1101 : /*
1102 : * Request contains data to be placed on FTL, compact it
1103 : */
1104 0 : ftl_writer_queue_rq(&dev->writer_user, rq);
1105 : } else {
1106 0 : compactor_deactivate(compactor);
1107 : }
1108 0 : }
1109 :
1110 : static void
1111 0 : compactor_free(struct spdk_ftl_dev *dev, struct ftl_nv_cache_compactor *compactor)
1112 : {
1113 0 : if (!compactor) {
1114 0 : return;
1115 : }
1116 :
1117 0 : ftl_rq_del(compactor->rq);
1118 0 : free(compactor);
1119 : }
1120 :
1121 : static struct ftl_nv_cache_compactor *
1122 0 : compactor_alloc(struct spdk_ftl_dev *dev)
1123 : {
1124 : struct ftl_nv_cache_compactor *compactor;
1125 : struct ftl_rq_entry *entry;
1126 :
1127 0 : compactor = calloc(1, sizeof(*compactor));
1128 0 : if (!compactor) {
1129 0 : goto error;
1130 : }
1131 :
1132 : /* Allocate help request for reading */
1133 0 : compactor->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
1134 0 : if (!compactor->rq) {
1135 0 : goto error;
1136 : }
1137 :
1138 0 : compactor->nv_cache = &dev->nv_cache;
1139 0 : compactor->rq->owner.priv = compactor;
1140 0 : compactor->rq->owner.cb = compaction_process_ftl_done;
1141 0 : compactor->rq->owner.compaction = true;
1142 :
1143 0 : FTL_RQ_ENTRY_LOOP(compactor->rq, entry, compactor->rq->num_blocks) {
1144 0 : compaction_process_invalidate_entry(entry);
1145 : }
1146 :
1147 0 : return compactor;
1148 :
1149 0 : error:
1150 0 : compactor_free(dev, compactor);
1151 0 : return NULL;
1152 : }
1153 :
1154 : static void
1155 0 : ftl_nv_cache_submit_cb_done(struct ftl_io *io)
1156 : {
1157 0 : struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1158 :
1159 0 : chunk_advance_blocks(nv_cache, io->nv_cache_chunk, io->num_blocks);
1160 0 : io->nv_cache_chunk = NULL;
1161 :
1162 0 : ftl_mempool_put(nv_cache->md_pool, io->md);
1163 0 : ftl_io_complete(io);
1164 0 : }
1165 :
1166 : static void
1167 0 : ftl_nv_cache_l2p_update(struct ftl_io *io)
1168 : {
1169 0 : struct spdk_ftl_dev *dev = io->dev;
1170 0 : ftl_addr next_addr = io->addr;
1171 : size_t i;
1172 :
1173 0 : for (i = 0; i < io->num_blocks; ++i, ++next_addr) {
1174 0 : ftl_l2p_update_cache(dev, ftl_io_get_lba(io, i), next_addr, io->map[i]);
1175 : }
1176 :
1177 0 : ftl_l2p_unpin(dev, io->lba, io->num_blocks);
1178 0 : ftl_nv_cache_submit_cb_done(io);
1179 0 : }
1180 :
1181 : static void
1182 0 : ftl_nv_cache_submit_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1183 : {
1184 0 : struct ftl_io *io = cb_arg;
1185 :
1186 0 : ftl_stats_bdev_io_completed(io->dev, FTL_STATS_TYPE_USER, bdev_io);
1187 :
1188 0 : spdk_bdev_free_io(bdev_io);
1189 :
1190 0 : if (spdk_unlikely(!success)) {
1191 0 : FTL_ERRLOG(io->dev, "Non-volatile cache write failed at %"PRIx64"\n",
1192 : io->addr);
1193 0 : io->status = -EIO;
1194 0 : ftl_l2p_unpin(io->dev, io->lba, io->num_blocks);
1195 0 : ftl_nv_cache_submit_cb_done(io);
1196 : } else {
1197 0 : ftl_nv_cache_l2p_update(io);
1198 : }
1199 0 : }
1200 :
1201 : static void
1202 0 : nv_cache_write(void *_io)
1203 : {
1204 0 : struct ftl_io *io = _io;
1205 0 : struct spdk_ftl_dev *dev = io->dev;
1206 0 : struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1207 : int rc;
1208 :
1209 0 : rc = spdk_bdev_writev_blocks_with_md(nv_cache->bdev_desc, nv_cache->cache_ioch,
1210 0 : io->iov, io->iov_cnt, io->md,
1211 : ftl_addr_to_nvc_offset(dev, io->addr), io->num_blocks,
1212 : ftl_nv_cache_submit_cb, io);
1213 0 : if (spdk_unlikely(rc)) {
1214 0 : if (rc == -ENOMEM) {
1215 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1216 0 : io->bdev_io_wait.bdev = bdev;
1217 0 : io->bdev_io_wait.cb_fn = nv_cache_write;
1218 0 : io->bdev_io_wait.cb_arg = io;
1219 0 : spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &io->bdev_io_wait);
1220 : } else {
1221 0 : ftl_abort();
1222 : }
1223 : }
1224 0 : }
1225 :
1226 : static void
1227 0 : ftl_nv_cache_pin_cb(struct spdk_ftl_dev *dev, int status, struct ftl_l2p_pin_ctx *pin_ctx)
1228 : {
1229 0 : struct ftl_io *io = pin_ctx->cb_ctx;
1230 : size_t i;
1231 :
1232 0 : if (spdk_unlikely(status != 0)) {
1233 : /* Retry on the internal L2P fault */
1234 0 : FTL_ERRLOG(dev, "Cannot PIN LBA for NV cache write failed at %"PRIx64"\n",
1235 : io->addr);
1236 0 : io->status = -EAGAIN;
1237 0 : ftl_nv_cache_submit_cb_done(io);
1238 0 : return;
1239 : }
1240 :
1241 : /* Remember previous l2p mapping to resolve conflicts in case of outstanding write-after-write */
1242 0 : for (i = 0; i < io->num_blocks; ++i) {
1243 0 : io->map[i] = ftl_l2p_get(dev, ftl_io_get_lba(io, i));
1244 : }
1245 :
1246 0 : assert(io->iov_pos == 0);
1247 :
1248 0 : ftl_trace_submission(io->dev, io, io->addr, io->num_blocks);
1249 :
1250 0 : nv_cache_write(io);
1251 : }
1252 :
1253 : bool
1254 0 : ftl_nv_cache_write(struct ftl_io *io)
1255 : {
1256 0 : struct spdk_ftl_dev *dev = io->dev;
1257 : uint64_t cache_offset;
1258 :
1259 0 : io->md = ftl_mempool_get(dev->nv_cache.md_pool);
1260 0 : if (spdk_unlikely(!io->md)) {
1261 0 : return false;
1262 : }
1263 :
1264 : /* Reserve area on the write buffer cache */
1265 0 : cache_offset = ftl_nv_cache_get_wr_buffer(&dev->nv_cache, io);
1266 0 : if (cache_offset == FTL_LBA_INVALID) {
1267 : /* No free space in NV cache, resubmit request */
1268 0 : ftl_mempool_put(dev->nv_cache.md_pool, io->md);
1269 0 : return false;
1270 : }
1271 0 : io->addr = ftl_addr_from_nvc_offset(dev, cache_offset);
1272 0 : io->nv_cache_chunk = dev->nv_cache.chunk_current;
1273 :
1274 0 : ftl_nv_cache_fill_md(io);
1275 0 : ftl_l2p_pin(io->dev, io->lba, io->num_blocks,
1276 : ftl_nv_cache_pin_cb, io,
1277 : &io->l2p_pin_ctx);
1278 :
1279 0 : dev->nv_cache.throttle.blocks_submitted += io->num_blocks;
1280 :
1281 0 : return true;
1282 : }
1283 :
1284 : int
1285 0 : ftl_nv_cache_read(struct ftl_io *io, ftl_addr addr, uint32_t num_blocks,
1286 : spdk_bdev_io_completion_cb cb, void *cb_arg)
1287 : {
1288 : int rc;
1289 0 : struct ftl_nv_cache *nv_cache = &io->dev->nv_cache;
1290 :
1291 0 : assert(ftl_addr_in_nvc(io->dev, addr));
1292 :
1293 0 : rc = ftl_nv_cache_bdev_read_blocks_with_md(io->dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1294 0 : ftl_io_iovec_addr(io), NULL, ftl_addr_to_nvc_offset(io->dev, addr),
1295 : num_blocks, cb, cb_arg);
1296 :
1297 0 : return rc;
1298 : }
1299 :
1300 : bool
1301 0 : ftl_nv_cache_is_halted(struct ftl_nv_cache *nv_cache)
1302 : {
1303 0 : if (nv_cache->compaction_active_count) {
1304 0 : return false;
1305 : }
1306 :
1307 0 : if (nv_cache->chunk_open_count > 0) {
1308 0 : return false;
1309 : }
1310 :
1311 0 : if (is_compaction_required_for_upgrade(nv_cache)) {
1312 0 : return false;
1313 : }
1314 :
1315 0 : return true;
1316 : }
1317 :
1318 : void
1319 0 : ftl_chunk_map_set_lba(struct ftl_nv_cache_chunk *chunk,
1320 : uint64_t offset, uint64_t lba)
1321 : {
1322 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1323 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1324 :
1325 0 : ftl_lba_store(dev, p2l_map->chunk_map, offset, lba);
1326 0 : }
1327 :
1328 : uint64_t
1329 0 : ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
1330 : {
1331 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1332 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1333 :
1334 0 : return ftl_lba_load(dev, p2l_map->chunk_map, offset);
1335 : }
1336 :
1337 : static void
1338 0 : ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
1339 : {
1340 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1341 0 : uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1342 : uint64_t offset;
1343 :
1344 0 : offset = (cache_offset - chunk->offset) % chunk->nv_cache->chunk_blocks;
1345 0 : ftl_chunk_map_set_lba(chunk, offset, lba);
1346 0 : }
1347 :
1348 : struct ftl_nv_cache_chunk *
1349 0 : ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev *dev, ftl_addr addr)
1350 : {
1351 0 : struct ftl_nv_cache_chunk *chunk = dev->nv_cache.chunks;
1352 : uint64_t chunk_idx;
1353 0 : uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
1354 :
1355 0 : assert(chunk != NULL);
1356 0 : chunk_idx = (cache_offset - chunk->offset) / chunk->nv_cache->chunk_blocks;
1357 0 : chunk += chunk_idx;
1358 :
1359 0 : return chunk;
1360 : }
1361 :
1362 : void
1363 0 : ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)
1364 : {
1365 : struct ftl_nv_cache_chunk *chunk;
1366 :
1367 0 : chunk = ftl_nv_cache_get_chunk_from_addr(dev, addr);
1368 :
1369 0 : assert(lba != FTL_LBA_INVALID);
1370 :
1371 0 : ftl_chunk_set_addr(chunk, lba, addr);
1372 0 : ftl_bitmap_set(dev->valid_map, addr);
1373 0 : }
1374 :
1375 : static void
1376 0 : ftl_nv_cache_throttle_update(struct ftl_nv_cache *nv_cache)
1377 : {
1378 : double err;
1379 : double modifier;
1380 :
1381 0 : err = ((double)nv_cache->chunk_free_count - nv_cache->chunk_free_target) / nv_cache->chunk_count;
1382 0 : modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_KP * err;
1383 :
1384 0 : if (modifier < FTL_NV_CACHE_THROTTLE_MODIFIER_MIN) {
1385 0 : modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MIN;
1386 0 : } else if (modifier > FTL_NV_CACHE_THROTTLE_MODIFIER_MAX) {
1387 0 : modifier = FTL_NV_CACHE_THROTTLE_MODIFIER_MAX;
1388 : }
1389 :
1390 0 : if (spdk_unlikely(nv_cache->compaction_sma == 0 || nv_cache->compaction_active_count == 0)) {
1391 0 : nv_cache->throttle.blocks_submitted_limit = UINT64_MAX;
1392 : } else {
1393 0 : double blocks_per_interval = nv_cache->compaction_sma * nv_cache->throttle.interval_tsc /
1394 : FTL_BLOCK_SIZE;
1395 0 : nv_cache->throttle.blocks_submitted_limit = blocks_per_interval * (1.0 + modifier);
1396 : }
1397 0 : }
1398 :
1399 : static void
1400 0 : ftl_nv_cache_process_throttle(struct ftl_nv_cache *nv_cache)
1401 : {
1402 0 : uint64_t tsc = spdk_thread_get_last_tsc(spdk_get_thread());
1403 :
1404 0 : if (spdk_unlikely(!nv_cache->throttle.start_tsc)) {
1405 0 : nv_cache->throttle.start_tsc = tsc;
1406 0 : } else if (tsc - nv_cache->throttle.start_tsc >= nv_cache->throttle.interval_tsc) {
1407 0 : ftl_nv_cache_throttle_update(nv_cache);
1408 0 : nv_cache->throttle.start_tsc = tsc;
1409 0 : nv_cache->throttle.blocks_submitted = 0;
1410 : }
1411 0 : }
1412 :
1413 : static void ftl_chunk_open(struct ftl_nv_cache_chunk *chunk);
1414 :
1415 : void
1416 0 : ftl_nv_cache_process(struct spdk_ftl_dev *dev)
1417 : {
1418 0 : struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1419 :
1420 0 : assert(dev->nv_cache.bdev_desc);
1421 :
1422 0 : if (nv_cache->chunk_open_count < FTL_MAX_OPEN_CHUNKS && spdk_likely(!nv_cache->halt) &&
1423 0 : !TAILQ_EMPTY(&nv_cache->chunk_free_list)) {
1424 0 : struct ftl_nv_cache_chunk *chunk = TAILQ_FIRST(&nv_cache->chunk_free_list);
1425 0 : TAILQ_REMOVE(&nv_cache->chunk_free_list, chunk, entry);
1426 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_open_list, chunk, entry);
1427 0 : nv_cache->chunk_free_count--;
1428 0 : chunk->md->seq_id = ftl_get_next_seq_id(dev);
1429 0 : ftl_chunk_open(chunk);
1430 0 : ftl_add_io_activity(dev);
1431 : }
1432 :
1433 0 : compaction_process(nv_cache);
1434 0 : ftl_chunk_persist_free_state(nv_cache);
1435 0 : ftl_nv_cache_process_throttle(nv_cache);
1436 0 : }
1437 :
1438 : static bool
1439 0 : ftl_nv_cache_full(struct ftl_nv_cache *nv_cache)
1440 : {
1441 0 : if (0 == nv_cache->chunk_open_count && NULL == nv_cache->chunk_current) {
1442 0 : return true;
1443 : } else {
1444 0 : return false;
1445 : }
1446 : }
1447 :
1448 : bool
1449 0 : ftl_nv_cache_throttle(struct spdk_ftl_dev *dev)
1450 : {
1451 0 : struct ftl_nv_cache *nv_cache = &dev->nv_cache;
1452 :
1453 0 : if (dev->nv_cache.throttle.blocks_submitted >= nv_cache->throttle.blocks_submitted_limit ||
1454 0 : ftl_nv_cache_full(nv_cache)) {
1455 0 : return true;
1456 : }
1457 :
1458 0 : return false;
1459 : }
1460 :
1461 : static void
1462 0 : chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk)
1463 : {
1464 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1465 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1466 :
1467 0 : ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1468 0 : p2l_map->chunk_map = NULL;
1469 :
1470 0 : ftl_chunk_free_md_entry(chunk);
1471 0 : }
1472 :
1473 : int
1474 0 : ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache)
1475 : {
1476 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1477 : struct ftl_nv_cache_chunk *chunk;
1478 0 : int status = 0;
1479 : uint64_t i;
1480 :
1481 0 : assert(nv_cache->chunk_open_count == 0);
1482 :
1483 0 : if (nv_cache->compaction_active_count) {
1484 0 : FTL_ERRLOG(dev, "Cannot save NV cache state, compaction in progress\n");
1485 0 : return -EINVAL;
1486 : }
1487 :
1488 0 : chunk = nv_cache->chunks;
1489 0 : if (!chunk) {
1490 0 : FTL_ERRLOG(dev, "Cannot save NV cache state, no NV cache metadata\n");
1491 0 : return -ENOMEM;
1492 : }
1493 :
1494 0 : for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1495 0 : nvc_validate_md(nv_cache, chunk->md);
1496 :
1497 0 : if (chunk->md->read_pointer) {
1498 : /* Only full chunks can be compacted */
1499 0 : if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1500 0 : assert(0);
1501 : status = -EINVAL;
1502 : break;
1503 : }
1504 :
1505 : /*
1506 : * Chunk in the middle of compaction, start over after
1507 : * load
1508 : */
1509 0 : chunk->md->read_pointer = chunk->md->blocks_compacted = 0;
1510 0 : } else if (chunk->md->blocks_written == nv_cache->chunk_blocks) {
1511 : /* Full chunk */
1512 0 : } else if (0 == chunk->md->blocks_written) {
1513 : /* Empty chunk */
1514 : } else {
1515 0 : assert(0);
1516 : status = -EINVAL;
1517 : break;
1518 : }
1519 : }
1520 :
1521 0 : if (status) {
1522 0 : FTL_ERRLOG(dev, "Cannot save NV cache state, inconsistent NV cache"
1523 : "metadata\n");
1524 : }
1525 :
1526 0 : return status;
1527 : }
1528 :
1529 : static int
1530 0 : sort_chunks_cmp(const void *a, const void *b)
1531 : {
1532 0 : struct ftl_nv_cache_chunk *a_chunk = *(struct ftl_nv_cache_chunk **)a;
1533 0 : struct ftl_nv_cache_chunk *b_chunk = *(struct ftl_nv_cache_chunk **)b;
1534 :
1535 0 : return a_chunk->md->seq_id - b_chunk->md->seq_id;
1536 : }
1537 :
1538 : static int
1539 0 : sort_chunks(struct ftl_nv_cache *nv_cache)
1540 : {
1541 : struct ftl_nv_cache_chunk **chunks_list;
1542 : struct ftl_nv_cache_chunk *chunk;
1543 : uint32_t i;
1544 :
1545 0 : if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
1546 0 : return 0;
1547 : }
1548 :
1549 0 : chunks_list = calloc(nv_cache->chunk_full_count,
1550 : sizeof(chunks_list[0]));
1551 0 : if (!chunks_list) {
1552 0 : return -ENOMEM;
1553 : }
1554 :
1555 0 : i = 0;
1556 0 : TAILQ_FOREACH(chunk, &nv_cache->chunk_full_list, entry) {
1557 0 : chunks_list[i] = chunk;
1558 0 : i++;
1559 : }
1560 0 : assert(i == nv_cache->chunk_full_count);
1561 :
1562 0 : qsort(chunks_list, nv_cache->chunk_full_count, sizeof(chunks_list[0]),
1563 : sort_chunks_cmp);
1564 :
1565 0 : TAILQ_INIT(&nv_cache->chunk_full_list);
1566 0 : for (i = 0; i < nv_cache->chunk_full_count; i++) {
1567 0 : chunk = chunks_list[i];
1568 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1569 : }
1570 :
1571 0 : free(chunks_list);
1572 0 : return 0;
1573 : }
1574 :
1575 : static int
1576 0 : chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk)
1577 : {
1578 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1579 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1580 :
1581 0 : assert(p2l_map->ref_cnt == 0);
1582 0 : assert(p2l_map->chunk_map == NULL);
1583 :
1584 0 : p2l_map->chunk_map = ftl_mempool_get(nv_cache->p2l_pool);
1585 :
1586 0 : if (!p2l_map->chunk_map) {
1587 0 : return -ENOMEM;
1588 : }
1589 :
1590 0 : if (ftl_chunk_alloc_md_entry(chunk)) {
1591 0 : ftl_mempool_put(nv_cache->p2l_pool, p2l_map->chunk_map);
1592 0 : p2l_map->chunk_map = NULL;
1593 0 : return -ENOMEM;
1594 : }
1595 :
1596 : /* Set the P2L to FTL_LBA_INVALID */
1597 0 : memset(p2l_map->chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
1598 :
1599 0 : return 0;
1600 : }
1601 :
1602 : int
1603 0 : ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
1604 : {
1605 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1606 : struct ftl_nv_cache_chunk *chunk;
1607 : uint64_t chunks_number, offset, i;
1608 0 : int status = 0;
1609 : bool active;
1610 :
1611 0 : nv_cache->chunk_current = NULL;
1612 0 : TAILQ_INIT(&nv_cache->chunk_free_list);
1613 0 : TAILQ_INIT(&nv_cache->chunk_full_list);
1614 0 : TAILQ_INIT(&nv_cache->chunk_inactive_list);
1615 0 : nv_cache->chunk_full_count = 0;
1616 0 : nv_cache->chunk_free_count = 0;
1617 0 : nv_cache->chunk_inactive_count = 0;
1618 :
1619 0 : assert(nv_cache->chunk_open_count == 0);
1620 0 : offset = nvc_data_offset(nv_cache);
1621 0 : if (!nv_cache->chunks) {
1622 0 : FTL_ERRLOG(dev, "No NV cache metadata\n");
1623 0 : return -1;
1624 : }
1625 :
1626 0 : if (dev->sb->upgrade_ready) {
1627 : /*
1628 : * During upgrade some transitions are allowed:
1629 : *
1630 : * 1. FREE -> INACTIVE
1631 : * 2. INACTIVE -> FREE
1632 : */
1633 0 : chunk = nv_cache->chunks;
1634 0 : for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1635 0 : active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1636 :
1637 0 : if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
1638 0 : if (!active) {
1639 0 : chunk->md->state = FTL_CHUNK_STATE_INACTIVE;
1640 : }
1641 0 : } else if (chunk->md->state == FTL_CHUNK_STATE_INACTIVE) {
1642 0 : if (active) {
1643 0 : chunk->md->state = FTL_CHUNK_STATE_FREE;
1644 : }
1645 : }
1646 : }
1647 : }
1648 :
1649 0 : chunk = nv_cache->chunks;
1650 0 : for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1651 0 : chunk->nv_cache = nv_cache;
1652 0 : nvc_validate_md(nv_cache, chunk->md);
1653 :
1654 0 : if (offset != chunk->offset) {
1655 0 : status = -EINVAL;
1656 0 : goto error;
1657 : }
1658 :
1659 0 : if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
1660 0 : status = -EINVAL;
1661 0 : goto error;
1662 : }
1663 :
1664 0 : active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
1665 0 : if (false == active) {
1666 0 : if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
1667 0 : status = -EINVAL;
1668 0 : goto error;
1669 : }
1670 : }
1671 :
1672 0 : switch (chunk->md->state) {
1673 0 : case FTL_CHUNK_STATE_FREE:
1674 0 : if (chunk->md->blocks_written || chunk->md->write_pointer) {
1675 0 : status = -EINVAL;
1676 0 : goto error;
1677 : }
1678 : /* Chunk empty, move it on empty list */
1679 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
1680 0 : nv_cache->chunk_free_count++;
1681 0 : break;
1682 0 : case FTL_CHUNK_STATE_OPEN:
1683 : /* All chunks needs to closed at this point */
1684 0 : status = -EINVAL;
1685 0 : goto error;
1686 : break;
1687 0 : case FTL_CHUNK_STATE_CLOSED:
1688 0 : if (chunk->md->blocks_written != nv_cache->chunk_blocks) {
1689 0 : status = -EINVAL;
1690 0 : goto error;
1691 : }
1692 : /* Chunk full, move it on full list */
1693 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_full_list, chunk, entry);
1694 0 : nv_cache->chunk_full_count++;
1695 0 : break;
1696 0 : case FTL_CHUNK_STATE_INACTIVE:
1697 0 : TAILQ_INSERT_TAIL(&nv_cache->chunk_inactive_list, chunk, entry);
1698 0 : nv_cache->chunk_inactive_count++;
1699 0 : break;
1700 0 : default:
1701 0 : status = -EINVAL;
1702 0 : FTL_ERRLOG(dev, "Invalid chunk state\n");
1703 0 : goto error;
1704 : }
1705 :
1706 0 : offset += nv_cache->chunk_blocks;
1707 : }
1708 :
1709 0 : chunks_number = nv_cache->chunk_free_count + nv_cache->chunk_full_count +
1710 0 : nv_cache->chunk_inactive_count;
1711 0 : assert(nv_cache->chunk_current == NULL);
1712 :
1713 0 : if (chunks_number != nv_cache->chunk_count) {
1714 0 : FTL_ERRLOG(dev, "Inconsistent NV cache metadata\n");
1715 0 : status = -EINVAL;
1716 0 : goto error;
1717 : }
1718 :
1719 0 : status = sort_chunks(nv_cache);
1720 0 : if (status) {
1721 0 : FTL_ERRLOG(dev, "FTL NV Cache: sorting chunks ERROR\n");
1722 : }
1723 :
1724 0 : FTL_NOTICELOG(dev, "FTL NV Cache: full chunks = %lu, empty chunks = %lu\n",
1725 : nv_cache->chunk_full_count, nv_cache->chunk_free_count);
1726 :
1727 0 : if (0 == status) {
1728 0 : FTL_NOTICELOG(dev, "FTL NV Cache: state loaded successfully\n");
1729 : } else {
1730 0 : FTL_ERRLOG(dev, "FTL NV Cache: loading state ERROR\n");
1731 : }
1732 :
1733 : /* The number of active/inactive chunks calculated at initialization can change at this point due to metadata
1734 : * upgrade. Recalculate the thresholds that depend on active chunk count.
1735 : */
1736 0 : ftl_nv_cache_init_update_limits(dev);
1737 0 : error:
1738 0 : return status;
1739 : }
1740 :
1741 : void
1742 0 : ftl_nv_cache_get_max_seq_id(struct ftl_nv_cache *nv_cache, uint64_t *open_seq_id,
1743 : uint64_t *close_seq_id)
1744 : {
1745 0 : uint64_t i, o_seq_id = 0, c_seq_id = 0;
1746 : struct ftl_nv_cache_chunk *chunk;
1747 :
1748 0 : chunk = nv_cache->chunks;
1749 0 : assert(chunk);
1750 :
1751 : /* Iterate over chunks and get their max open and close seq id */
1752 0 : for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
1753 0 : o_seq_id = spdk_max(o_seq_id, chunk->md->seq_id);
1754 0 : c_seq_id = spdk_max(c_seq_id, chunk->md->close_seq_id);
1755 : }
1756 :
1757 0 : *open_seq_id = o_seq_id;
1758 0 : *close_seq_id = c_seq_id;
1759 0 : }
1760 :
1761 : typedef void (*ftl_chunk_ops_cb)(struct ftl_nv_cache_chunk *chunk, void *cntx, bool status);
1762 :
1763 : static void
1764 0 : write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1765 : {
1766 0 : struct ftl_basic_rq *brq = arg;
1767 0 : struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1768 :
1769 0 : ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1770 :
1771 0 : brq->success = success;
1772 0 : if (spdk_likely(success)) {
1773 0 : chunk_advance_blocks(chunk->nv_cache, chunk, brq->num_blocks);
1774 : }
1775 :
1776 0 : spdk_bdev_free_io(bdev_io);
1777 0 : brq->owner.cb(brq);
1778 0 : }
1779 :
1780 : static void
1781 0 : _ftl_chunk_basic_rq_write(void *_brq)
1782 : {
1783 0 : struct ftl_basic_rq *brq = _brq;
1784 0 : struct ftl_nv_cache *nv_cache = brq->io.chunk->nv_cache;
1785 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1786 : int rc;
1787 :
1788 0 : rc = ftl_nv_cache_bdev_write_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1789 : brq->io_payload, NULL, brq->io.addr,
1790 : brq->num_blocks, write_brq_end, brq);
1791 0 : if (spdk_unlikely(rc)) {
1792 0 : if (rc == -ENOMEM) {
1793 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
1794 0 : brq->io.bdev_io_wait.bdev = bdev;
1795 0 : brq->io.bdev_io_wait.cb_fn = _ftl_chunk_basic_rq_write;
1796 0 : brq->io.bdev_io_wait.cb_arg = brq;
1797 0 : spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, &brq->io.bdev_io_wait);
1798 : } else {
1799 0 : ftl_abort();
1800 : }
1801 : }
1802 0 : }
1803 :
1804 : static void
1805 0 : ftl_chunk_basic_rq_write(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1806 : {
1807 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1808 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1809 :
1810 0 : brq->io.chunk = chunk;
1811 0 : brq->success = false;
1812 :
1813 0 : _ftl_chunk_basic_rq_write(brq);
1814 :
1815 0 : chunk->md->write_pointer += brq->num_blocks;
1816 0 : dev->stats.io_activity_total += brq->num_blocks;
1817 0 : }
1818 :
1819 : static void
1820 0 : read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
1821 : {
1822 0 : struct ftl_basic_rq *brq = arg;
1823 :
1824 0 : ftl_stats_bdev_io_completed(brq->dev, FTL_STATS_TYPE_MD_NV_CACHE, bdev_io);
1825 :
1826 0 : brq->success = success;
1827 :
1828 0 : brq->owner.cb(brq);
1829 0 : spdk_bdev_free_io(bdev_io);
1830 0 : }
1831 :
1832 : static int
1833 0 : ftl_chunk_basic_rq_read(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq)
1834 : {
1835 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
1836 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
1837 : int rc;
1838 :
1839 0 : brq->io.chunk = chunk;
1840 0 : brq->success = false;
1841 :
1842 0 : rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc, nv_cache->cache_ioch,
1843 : brq->io_payload, NULL, brq->io.addr, brq->num_blocks, read_brq_end, brq);
1844 :
1845 0 : if (spdk_likely(!rc)) {
1846 0 : dev->stats.io_activity_total += brq->num_blocks;
1847 : }
1848 :
1849 0 : return rc;
1850 : }
1851 :
1852 : static void
1853 0 : chunk_open_cb(int status, void *ctx)
1854 : {
1855 0 : struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1856 :
1857 0 : if (spdk_unlikely(status)) {
1858 : #ifdef SPDK_FTL_RETRY_ON_ERROR
1859 : ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1860 : return;
1861 : #else
1862 0 : ftl_abort();
1863 : #endif
1864 : }
1865 :
1866 0 : chunk->md->state = FTL_CHUNK_STATE_OPEN;
1867 0 : }
1868 :
1869 : static void
1870 0 : ftl_chunk_open(struct ftl_nv_cache_chunk *chunk)
1871 : {
1872 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1873 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1874 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1875 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1876 :
1877 0 : if (chunk_alloc_p2l_map(chunk)) {
1878 0 : assert(0);
1879 : /*
1880 : * We control number of opening chunk and it shall be consistent with size of chunk
1881 : * P2L map pool
1882 : */
1883 : ftl_abort();
1884 : return;
1885 : }
1886 :
1887 0 : chunk->nv_cache->chunk_open_count++;
1888 :
1889 0 : assert(chunk->md->write_pointer == 0);
1890 0 : assert(chunk->md->blocks_written == 0);
1891 :
1892 0 : memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1893 0 : p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_OPEN;
1894 0 : p2l_map->chunk_dma_md->p2l_map_checksum = 0;
1895 :
1896 0 : ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md,
1897 : NULL, chunk_open_cb, chunk,
1898 : &chunk->md_persist_entry_ctx);
1899 : }
1900 :
1901 : static void
1902 0 : chunk_close_cb(int status, void *ctx)
1903 : {
1904 0 : struct ftl_nv_cache_chunk *chunk = (struct ftl_nv_cache_chunk *)ctx;
1905 :
1906 0 : assert(chunk->md->write_pointer == chunk->nv_cache->chunk_blocks);
1907 :
1908 0 : if (spdk_likely(!status)) {
1909 0 : chunk->md->p2l_map_checksum = chunk->p2l_map.chunk_dma_md->p2l_map_checksum;
1910 0 : chunk_free_p2l_map(chunk);
1911 :
1912 0 : assert(chunk->nv_cache->chunk_open_count > 0);
1913 0 : chunk->nv_cache->chunk_open_count--;
1914 :
1915 : /* Chunk full move it on full list */
1916 0 : TAILQ_INSERT_TAIL(&chunk->nv_cache->chunk_full_list, chunk, entry);
1917 0 : chunk->nv_cache->chunk_full_count++;
1918 :
1919 0 : chunk->nv_cache->last_seq_id = chunk->md->close_seq_id;
1920 :
1921 0 : chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1922 : } else {
1923 : #ifdef SPDK_FTL_RETRY_ON_ERROR
1924 : ftl_md_persist_entry_retry(&chunk->md_persist_entry_ctx);
1925 : #else
1926 0 : ftl_abort();
1927 : #endif
1928 : }
1929 0 : }
1930 :
1931 : static void
1932 0 : chunk_map_write_cb(struct ftl_basic_rq *brq)
1933 : {
1934 0 : struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
1935 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1936 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1937 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
1938 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
1939 : uint32_t chunk_map_crc;
1940 :
1941 0 : if (spdk_likely(brq->success)) {
1942 0 : chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
1943 0 : chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
1944 0 : memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
1945 0 : p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
1946 0 : p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
1947 0 : ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk->p2l_map.chunk_dma_md,
1948 : NULL, chunk_close_cb, chunk,
1949 : &chunk->md_persist_entry_ctx);
1950 : } else {
1951 : #ifdef SPDK_FTL_RETRY_ON_ERROR
1952 : /* retry */
1953 : chunk->md->write_pointer -= brq->num_blocks;
1954 : ftl_chunk_basic_rq_write(chunk, brq);
1955 : #else
1956 0 : ftl_abort();
1957 : #endif
1958 : }
1959 0 : }
1960 :
1961 : static void
1962 0 : ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
1963 : {
1964 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
1965 0 : struct ftl_basic_rq *brq = &chunk->metadata_rq;
1966 0 : void *metadata = chunk->p2l_map.chunk_map;
1967 :
1968 0 : chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
1969 0 : ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
1970 0 : ftl_basic_rq_set_owner(brq, chunk_map_write_cb, chunk);
1971 :
1972 0 : assert(chunk->md->write_pointer == chunk_tail_md_offset(chunk->nv_cache));
1973 0 : brq->io.addr = chunk->offset + chunk->md->write_pointer;
1974 :
1975 0 : ftl_chunk_basic_rq_write(chunk, brq);
1976 0 : }
1977 :
1978 : static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
1979 : void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
1980 : static void read_tail_md_cb(struct ftl_basic_rq *brq);
1981 : static void recover_open_chunk_cb(struct ftl_basic_rq *brq);
1982 :
1983 : static void
1984 0 : restore_chunk_close_cb(int status, void *ctx)
1985 : {
1986 0 : struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
1987 0 : struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
1988 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
1989 :
1990 0 : if (spdk_unlikely(status)) {
1991 0 : parent->success = false;
1992 : } else {
1993 0 : chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
1994 0 : chunk->md->state = FTL_CHUNK_STATE_CLOSED;
1995 : }
1996 :
1997 0 : read_tail_md_cb(parent);
1998 0 : }
1999 :
2000 : static void
2001 0 : restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
2002 : {
2003 0 : struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2004 0 : struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
2005 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2006 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2007 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
2008 : uint32_t chunk_map_crc;
2009 :
2010 : /* Set original callback */
2011 0 : ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);
2012 :
2013 0 : if (spdk_unlikely(!parent->success)) {
2014 0 : read_tail_md_cb(parent);
2015 0 : return;
2016 : }
2017 :
2018 0 : chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
2019 0 : chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
2020 0 : memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
2021 0 : p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
2022 0 : p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
2023 0 : p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
2024 0 : p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;
2025 :
2026 0 : ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
2027 : restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
2028 : }
2029 :
2030 : static void
2031 0 : restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
2032 : {
2033 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2034 : void *metadata;
2035 :
2036 0 : chunk->md->close_seq_id = ftl_get_next_seq_id(dev);
2037 :
2038 0 : metadata = chunk->p2l_map.chunk_map;
2039 0 : ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2040 0 : ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);
2041 :
2042 0 : parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2043 0 : parent->io.chunk = chunk;
2044 :
2045 0 : ftl_chunk_basic_rq_write(chunk, parent);
2046 0 : }
2047 :
2048 : static void
2049 0 : read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2050 : {
2051 0 : struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
2052 0 : struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
2053 0 : struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
2054 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2055 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2056 : union ftl_md_vss *md;
2057 0 : uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
2058 0 : uint64_t len = bdev_io->u.bdev.num_blocks;
2059 0 : ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
2060 : int rc;
2061 :
2062 0 : ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);
2063 :
2064 0 : spdk_bdev_free_io(bdev_io);
2065 :
2066 0 : if (!success) {
2067 0 : parent->success = false;
2068 0 : read_tail_md_cb(parent);
2069 0 : return;
2070 : }
2071 :
2072 0 : while (rq->iter.idx < rq->iter.count) {
2073 : /* Get metadata */
2074 0 : md = rq->entries[rq->iter.idx].io_md;
2075 0 : if (md->nv_cache.seq_id != chunk->md->seq_id) {
2076 0 : md->nv_cache.lba = FTL_LBA_INVALID;
2077 : }
2078 : /*
2079 : * The p2l map contains effectively random data at this point (since it contains arbitrary
2080 : * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
2081 : */
2082 :
2083 0 : ftl_chunk_set_addr(chunk, md->nv_cache.lba, addr + rq->iter.idx);
2084 0 : rq->iter.idx++;
2085 : }
2086 :
2087 0 : if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
2088 0 : cache_offset += len;
2089 0 : len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
2090 0 : rq->iter.idx = 0;
2091 0 : rq->iter.count = len;
2092 :
2093 0 : rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2094 : nv_cache->cache_ioch,
2095 : rq->io_payload,
2096 : rq->io_md,
2097 : cache_offset, len,
2098 : read_open_chunk_cb,
2099 : rq);
2100 :
2101 0 : if (rc) {
2102 0 : ftl_rq_del(rq);
2103 0 : parent->success = false;
2104 0 : read_tail_md_cb(parent);
2105 0 : return;
2106 : }
2107 : } else {
2108 0 : ftl_rq_del(rq);
2109 0 : restore_fill_tail_md(parent, chunk);
2110 : }
2111 : }
2112 :
2113 : static void
2114 0 : restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
2115 : {
2116 0 : struct ftl_nv_cache *nv_cache = chunk->nv_cache;
2117 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
2118 : struct ftl_rq *rq;
2119 : uint64_t addr;
2120 0 : uint64_t len = dev->xfer_size;
2121 : int rc;
2122 :
2123 : /*
2124 : * We've just read the p2l map, prefill it with INVALID LBA
2125 : * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
2126 : */
2127 0 : memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);
2128 :
2129 : /* Need to read user data, recalculate chunk's P2L and write tail md with it */
2130 0 : rq = ftl_rq_new(dev, dev->nv_cache.md_size);
2131 0 : if (!rq) {
2132 0 : parent->success = false;
2133 0 : read_tail_md_cb(parent);
2134 0 : return;
2135 : }
2136 :
2137 0 : rq->owner.priv = parent;
2138 0 : rq->iter.idx = 0;
2139 0 : rq->iter.count = len;
2140 :
2141 0 : addr = chunk->offset;
2142 :
2143 0 : len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);
2144 :
2145 0 : rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
2146 : nv_cache->cache_ioch,
2147 : rq->io_payload,
2148 : rq->io_md,
2149 : addr, len,
2150 : read_open_chunk_cb,
2151 : rq);
2152 :
2153 0 : if (rc) {
2154 0 : ftl_rq_del(rq);
2155 0 : parent->success = false;
2156 0 : read_tail_md_cb(parent);
2157 : }
2158 : }
2159 :
2160 : static void
2161 0 : read_tail_md_cb(struct ftl_basic_rq *brq)
2162 : {
2163 0 : brq->owner.cb(brq);
2164 0 : }
2165 :
2166 : static int
2167 0 : ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
2168 : void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
2169 : {
2170 0 : struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
2171 : void *metadata;
2172 : int rc;
2173 :
2174 0 : metadata = chunk->p2l_map.chunk_map;
2175 0 : ftl_basic_rq_init(dev, brq, metadata, chunk->nv_cache->tail_md_chunk_blocks);
2176 0 : ftl_basic_rq_set_owner(brq, cb, cb_ctx);
2177 :
2178 0 : brq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
2179 0 : rc = ftl_chunk_basic_rq_read(chunk, brq);
2180 :
2181 0 : return rc;
2182 : }
2183 :
2184 : struct restore_chunk_md_ctx {
2185 : ftl_chunk_md_cb cb;
2186 : void *cb_ctx;
2187 : int status;
2188 : uint64_t qd;
2189 : uint64_t id;
2190 : };
2191 :
2192 : static inline bool
2193 0 : is_chunk_count_valid(struct ftl_nv_cache *nv_cache)
2194 : {
2195 0 : uint64_t chunk_count = 0;
2196 :
2197 0 : chunk_count += nv_cache->chunk_open_count;
2198 0 : chunk_count += nv_cache->chunk_free_count;
2199 0 : chunk_count += nv_cache->chunk_full_count;
2200 0 : chunk_count += nv_cache->chunk_comp_count;
2201 0 : chunk_count += nv_cache->chunk_inactive_count;
2202 :
2203 0 : return chunk_count == nv_cache->chunk_count;
2204 : }
2205 :
2206 : static void
2207 0 : walk_tail_md_cb(struct ftl_basic_rq *brq)
2208 : {
2209 0 : struct ftl_mngt_process *mngt = brq->owner.priv;
2210 0 : struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2211 0 : struct restore_chunk_md_ctx *ctx = ftl_mngt_get_step_ctx(mngt);
2212 0 : int rc = 0;
2213 :
2214 0 : if (brq->success) {
2215 0 : rc = ctx->cb(chunk, ctx->cb_ctx);
2216 : } else {
2217 0 : rc = -EIO;
2218 : }
2219 :
2220 0 : if (rc) {
2221 0 : ctx->status = rc;
2222 : }
2223 0 : ctx->qd--;
2224 0 : chunk_free_p2l_map(chunk);
2225 0 : ftl_mngt_continue_step(mngt);
2226 0 : }
2227 :
2228 : static void
2229 0 : ftl_mngt_nv_cache_walk_tail_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2230 : uint64_t seq_id, ftl_chunk_md_cb cb, void *cb_ctx)
2231 : {
2232 0 : struct ftl_nv_cache *nvc = &dev->nv_cache;
2233 : struct restore_chunk_md_ctx *ctx;
2234 :
2235 0 : ctx = ftl_mngt_get_step_ctx(mngt);
2236 0 : if (!ctx) {
2237 0 : if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*ctx))) {
2238 0 : ftl_mngt_fail_step(mngt);
2239 0 : return;
2240 : }
2241 0 : ctx = ftl_mngt_get_step_ctx(mngt);
2242 0 : assert(ctx);
2243 :
2244 0 : ctx->cb = cb;
2245 0 : ctx->cb_ctx = cb_ctx;
2246 : }
2247 :
2248 : /*
2249 : * This function generates a high queue depth and will utilize ftl_mngt_continue_step during completions to make sure all chunks
2250 : * are processed before returning an error (if any were found) or continuing on.
2251 : */
2252 0 : if (0 == ctx->qd && ctx->id == nvc->chunk_count) {
2253 0 : if (!is_chunk_count_valid(nvc)) {
2254 0 : FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2255 0 : assert(false);
2256 : ctx->status = -EINVAL;
2257 : }
2258 :
2259 0 : if (ctx->status) {
2260 0 : ftl_mngt_fail_step(mngt);
2261 : } else {
2262 0 : ftl_mngt_next_step(mngt);
2263 : }
2264 0 : return;
2265 : }
2266 :
2267 0 : while (ctx->id < nvc->chunk_count) {
2268 0 : struct ftl_nv_cache_chunk *chunk = &nvc->chunks[ctx->id];
2269 : int rc;
2270 :
2271 0 : if (!chunk->recovery) {
2272 : /* This chunk is inactive or empty and not used in recovery */
2273 0 : ctx->id++;
2274 0 : continue;
2275 : }
2276 :
2277 0 : if (seq_id && (chunk->md->close_seq_id <= seq_id)) {
2278 0 : ctx->id++;
2279 0 : continue;
2280 : }
2281 :
2282 0 : if (chunk_alloc_p2l_map(chunk)) {
2283 : /* No more free P2L map, break and continue later */
2284 0 : break;
2285 : }
2286 0 : ctx->id++;
2287 :
2288 0 : rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, walk_tail_md_cb, mngt);
2289 :
2290 0 : if (0 == rc) {
2291 0 : ctx->qd++;
2292 : } else {
2293 0 : chunk_free_p2l_map(chunk);
2294 0 : ctx->status = rc;
2295 : }
2296 : }
2297 :
2298 0 : if (0 == ctx->qd) {
2299 : /*
2300 : * No QD could happen due to all leftover chunks being in free state.
2301 : * Additionally ftl_chunk_read_tail_md could fail starting with the first IO in a given patch.
2302 : * For streamlining of all potential error handling (since many chunks are reading P2L at the same time),
2303 : * we're using ftl_mngt_continue_step to arrive at the same spot of checking for mngt step end (see beginning of function).
2304 : */
2305 0 : ftl_mngt_continue_step(mngt);
2306 : }
2307 :
2308 : }
2309 :
2310 : void
2311 0 : ftl_mngt_nv_cache_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
2312 : ftl_chunk_md_cb cb, void *cb_ctx)
2313 : {
2314 0 : ftl_mngt_nv_cache_walk_tail_md(dev, mngt, dev->sb->ckpt_seq_id, cb, cb_ctx);
2315 0 : }
2316 :
2317 : static void
2318 0 : restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
2319 : {
2320 0 : struct ftl_mngt_process *mngt = md->owner.cb_ctx;
2321 0 : struct ftl_nv_cache *nvc = &dev->nv_cache;
2322 : struct ftl_nv_cache_chunk *chunk;
2323 : uint64_t i;
2324 :
2325 0 : if (status) {
2326 : /* Restore error, end step */
2327 0 : ftl_mngt_fail_step(mngt);
2328 0 : return;
2329 : }
2330 :
2331 0 : for (i = 0; i < nvc->chunk_count; i++) {
2332 0 : chunk = &nvc->chunks[i];
2333 :
2334 0 : if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
2335 0 : chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
2336 0 : status = -EINVAL;
2337 0 : break;
2338 : }
2339 :
2340 0 : if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
2341 0 : status = -EINVAL;
2342 0 : break;
2343 : }
2344 :
2345 0 : switch (chunk->md->state) {
2346 0 : case FTL_CHUNK_STATE_FREE:
2347 0 : break;
2348 0 : case FTL_CHUNK_STATE_OPEN:
2349 0 : TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2350 0 : nvc->chunk_free_count--;
2351 :
2352 0 : TAILQ_INSERT_TAIL(&nvc->chunk_open_list, chunk, entry);
2353 0 : nvc->chunk_open_count++;
2354 :
2355 : /* Chunk is not empty, mark it to be recovered */
2356 0 : chunk->recovery = true;
2357 0 : break;
2358 0 : case FTL_CHUNK_STATE_CLOSED:
2359 0 : TAILQ_REMOVE(&nvc->chunk_free_list, chunk, entry);
2360 0 : nvc->chunk_free_count--;
2361 :
2362 0 : TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2363 0 : nvc->chunk_full_count++;
2364 :
2365 : /* Chunk is not empty, mark it to be recovered */
2366 0 : chunk->recovery = true;
2367 0 : break;
2368 0 : case FTL_CHUNK_STATE_INACTIVE:
2369 0 : break;
2370 0 : default:
2371 0 : status = -EINVAL;
2372 : }
2373 : }
2374 :
2375 0 : if (status) {
2376 0 : ftl_mngt_fail_step(mngt);
2377 : } else {
2378 0 : ftl_mngt_next_step(mngt);
2379 : }
2380 : }
2381 :
2382 : void
2383 0 : ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2384 : {
2385 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
2386 :
2387 0 : md->owner.cb_ctx = mngt;
2388 0 : md->cb = restore_chunk_state_cb;
2389 0 : ftl_md_restore(md);
2390 0 : }
2391 :
2392 : static void
2393 0 : recover_open_chunk_cb(struct ftl_basic_rq *brq)
2394 : {
2395 0 : struct ftl_mngt_process *mngt = brq->owner.priv;
2396 0 : struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
2397 0 : struct ftl_nv_cache *nvc = chunk->nv_cache;
2398 0 : struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
2399 :
2400 0 : chunk_free_p2l_map(chunk);
2401 :
2402 0 : if (!brq->success) {
2403 0 : FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2404 : chunk->md->seq_id);
2405 0 : ftl_mngt_fail_step(mngt);
2406 0 : return;
2407 : }
2408 :
2409 0 : FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
2410 : chunk->md->seq_id);
2411 :
2412 0 : TAILQ_REMOVE(&nvc->chunk_open_list, chunk, entry);
2413 0 : nvc->chunk_open_count--;
2414 :
2415 0 : TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
2416 0 : nvc->chunk_full_count++;
2417 :
2418 : /* This is closed chunk */
2419 0 : chunk->md->write_pointer = nvc->chunk_blocks;
2420 0 : chunk->md->blocks_written = nvc->chunk_blocks;
2421 :
2422 0 : ftl_mngt_continue_step(mngt);
2423 : }
2424 :
2425 : void
2426 0 : ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
2427 : {
2428 0 : struct ftl_nv_cache *nvc = &dev->nv_cache;
2429 : struct ftl_nv_cache_chunk *chunk;
2430 0 : struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
2431 :
2432 0 : if (!brq) {
2433 0 : if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2434 0 : FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
2435 0 : ftl_mngt_next_step(mngt);
2436 0 : return;
2437 : }
2438 :
2439 0 : if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
2440 0 : ftl_mngt_fail_step(mngt);
2441 0 : return;
2442 : }
2443 0 : brq = ftl_mngt_get_step_ctx(mngt);
2444 0 : ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
2445 : }
2446 :
2447 0 : if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
2448 0 : if (!is_chunk_count_valid(nvc)) {
2449 0 : FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
2450 0 : ftl_mngt_fail_step(mngt);
2451 0 : return;
2452 : }
2453 :
2454 : /*
2455 : * Now all chunks loaded and closed, do final step of restoring
2456 : * chunks state
2457 : */
2458 0 : if (ftl_nv_cache_load_state(nvc)) {
2459 0 : ftl_mngt_fail_step(mngt);
2460 : } else {
2461 0 : ftl_mngt_next_step(mngt);
2462 : }
2463 : } else {
2464 0 : chunk = TAILQ_FIRST(&nvc->chunk_open_list);
2465 0 : if (chunk_alloc_p2l_map(chunk)) {
2466 0 : ftl_mngt_fail_step(mngt);
2467 0 : return;
2468 : }
2469 :
2470 0 : brq->io.chunk = chunk;
2471 :
2472 0 : FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
2473 : chunk->offset, chunk->md->seq_id);
2474 0 : restore_open_chunk(chunk, brq);
2475 : }
2476 : }
2477 :
2478 : int
2479 0 : ftl_nv_cache_chunks_busy(struct ftl_nv_cache *nv_cache)
2480 : {
2481 : /* chunk_current is migrating to closed status when closing, any others should already be
2482 : * moved to free chunk list. Also need to wait for free md requests */
2483 0 : return nv_cache->chunk_open_count == 0 && nv_cache->chunk_free_persist_count == 0;
2484 : }
2485 :
2486 : void
2487 0 : ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)
2488 : {
2489 : struct ftl_nv_cache_chunk *chunk;
2490 : uint64_t free_space;
2491 :
2492 0 : nv_cache->halt = true;
2493 :
2494 : /* Set chunks on open list back to free state since no user data has been written to it */
2495 0 : while (!TAILQ_EMPTY(&nv_cache->chunk_open_list)) {
2496 0 : chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2497 :
2498 : /* Chunks are moved between lists on metadata update submission, but state is changed
2499 : * on completion. Breaking early in such a case to make sure all the necessary resources
2500 : * will be freed (during next pass(es) of ftl_nv_cache_halt).
2501 : */
2502 0 : if (chunk->md->state != FTL_CHUNK_STATE_OPEN) {
2503 0 : break;
2504 : }
2505 :
2506 0 : TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
2507 0 : chunk_free_p2l_map(chunk);
2508 0 : ftl_nv_cache_chunk_md_initialize(chunk->md);
2509 0 : assert(nv_cache->chunk_open_count > 0);
2510 0 : nv_cache->chunk_open_count--;
2511 : }
2512 :
2513 : /* Close current chunk by skipping all not written blocks */
2514 0 : chunk = nv_cache->chunk_current;
2515 0 : if (chunk != NULL) {
2516 0 : nv_cache->chunk_current = NULL;
2517 0 : if (chunk_is_closed(chunk)) {
2518 0 : return;
2519 : }
2520 :
2521 0 : free_space = chunk_get_free_space(nv_cache, chunk);
2522 0 : chunk->md->blocks_skipped = free_space;
2523 0 : chunk->md->blocks_written += free_space;
2524 0 : chunk->md->write_pointer += free_space;
2525 0 : ftl_chunk_close(chunk);
2526 : }
2527 : }
2528 :
2529 : uint64_t
2530 0 : ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache)
2531 : {
2532 0 : struct ftl_nv_cache_chunk *chunk = nv_cache->chunk_current;
2533 : uint64_t seq_id, free_space;
2534 :
2535 0 : if (!chunk) {
2536 0 : chunk = TAILQ_FIRST(&nv_cache->chunk_open_list);
2537 0 : if (chunk && chunk->md->state == FTL_CHUNK_STATE_OPEN) {
2538 0 : return chunk->md->seq_id;
2539 : } else {
2540 0 : return 0;
2541 : }
2542 : }
2543 :
2544 0 : if (chunk_is_closed(chunk)) {
2545 0 : return 0;
2546 : }
2547 :
2548 0 : seq_id = nv_cache->chunk_current->md->seq_id;
2549 0 : free_space = chunk_get_free_space(nv_cache, chunk);
2550 :
2551 0 : chunk->md->blocks_skipped = free_space;
2552 0 : chunk->md->blocks_written += free_space;
2553 0 : chunk->md->write_pointer += free_space;
2554 0 : if (chunk->md->blocks_written == chunk_tail_md_offset(nv_cache)) {
2555 0 : ftl_chunk_close(chunk);
2556 : }
2557 0 : nv_cache->chunk_current = NULL;
2558 :
2559 0 : seq_id++;
2560 0 : return seq_id;
2561 : }
2562 :
2563 : static double
2564 0 : ftl_nv_cache_get_chunk_utilization(struct ftl_nv_cache *nv_cache,
2565 : struct ftl_nv_cache_chunk *chunk)
2566 : {
2567 0 : double capacity = nv_cache->chunk_blocks;
2568 0 : double used = chunk->md->blocks_written + chunk->md->blocks_skipped;
2569 :
2570 0 : return used / capacity;
2571 : }
2572 :
2573 : static const char *
2574 0 : ftl_nv_cache_get_chunk_state_name(struct ftl_nv_cache_chunk *chunk)
2575 : {
2576 : static const char *names[] = {
2577 : "FREE", "OPEN", "CLOSED", "INACTIVE"
2578 : };
2579 :
2580 0 : assert(chunk->md->state < SPDK_COUNTOF(names));
2581 0 : if (chunk->md->state < SPDK_COUNTOF(names)) {
2582 0 : return names[chunk->md->state];
2583 : } else {
2584 0 : assert(false);
2585 : return "?";
2586 : }
2587 : }
2588 :
2589 : static void
2590 0 : ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property *property,
2591 : struct spdk_json_write_ctx *w)
2592 : {
2593 : uint64_t i;
2594 : struct ftl_nv_cache_chunk *chunk;
2595 :
2596 0 : spdk_json_write_named_string(w, "type", dev->nv_cache.nvc_type->name);
2597 0 : spdk_json_write_named_array_begin(w, "chunks");
2598 0 : for (i = 0, chunk = dev->nv_cache.chunks; i < dev->nv_cache.chunk_count; i++, chunk++) {
2599 0 : spdk_json_write_object_begin(w);
2600 0 : spdk_json_write_named_uint64(w, "id", i);
2601 0 : spdk_json_write_named_string(w, "state", ftl_nv_cache_get_chunk_state_name(chunk));
2602 0 : spdk_json_write_named_double(w, "utilization",
2603 : ftl_nv_cache_get_chunk_utilization(&dev->nv_cache, chunk));
2604 0 : spdk_json_write_object_end(w);
2605 : }
2606 0 : spdk_json_write_array_end(w);
2607 0 : }
2608 :
2609 : void
2610 0 : ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
2611 : {
2612 0 : memset(md, 0, sizeof(*md));
2613 0 : md->version = FTL_NVC_VERSION_CURRENT;
2614 0 : }
|