Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2022 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "spdk/stdinc.h"
7 : #include "spdk/queue.h"
8 : #include "spdk/bdev_module.h"
9 :
10 : #include "ftl_core.h"
11 : #include "ftl_band.h"
12 : #include "ftl_internal.h"
13 :
14 : static void
15 0 : write_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
16 : {
17 0 : struct ftl_rq *rq = arg;
18 0 : struct spdk_ftl_dev *dev = rq->dev;
19 :
20 0 : ftl_stats_bdev_io_completed(dev, rq->owner.compaction ? FTL_STATS_TYPE_CMP : FTL_STATS_TYPE_GC,
21 : bdev_io);
22 0 : spdk_bdev_free_io(bdev_io);
23 :
24 0 : rq->success = success;
25 0 : if (spdk_likely(rq->success)) {
26 0 : ftl_p2l_ckpt_issue(rq);
27 : } else {
28 : #ifdef SPDK_FTL_RETRY_ON_ERROR
29 : assert(rq->io.band->queue_depth > 0);
30 : rq->io.band->queue_depth--;
31 : rq->owner.cb(rq);
32 :
33 : #else
34 0 : ftl_abort();
35 : #endif
36 : }
37 0 : }
38 :
39 : static void
40 0 : ftl_band_rq_bdev_write(void *_rq)
41 : {
42 0 : struct ftl_rq *rq = _rq;
43 0 : struct ftl_band *band = rq->io.band;
44 0 : struct spdk_ftl_dev *dev = band->dev;
45 : int rc;
46 :
47 0 : rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
48 : rq->io_payload, rq->io.addr, rq->num_blocks,
49 : write_rq_end, rq);
50 :
51 0 : if (spdk_unlikely(rc)) {
52 0 : if (rc == -ENOMEM) {
53 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
54 0 : rq->io.bdev_io_wait.bdev = bdev;
55 0 : rq->io.bdev_io_wait.cb_fn = ftl_band_rq_bdev_write;
56 0 : rq->io.bdev_io_wait.cb_arg = rq;
57 0 : spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &rq->io.bdev_io_wait);
58 : } else {
59 0 : ftl_abort();
60 : }
61 : }
62 0 : }
63 :
64 : void
65 0 : ftl_band_rq_write(struct ftl_band *band, struct ftl_rq *rq)
66 : {
67 0 : struct spdk_ftl_dev *dev = band->dev;
68 :
69 0 : rq->success = false;
70 0 : rq->io.band = band;
71 0 : rq->io.addr = band->md->iter.addr;
72 :
73 0 : ftl_band_rq_bdev_write(rq);
74 :
75 0 : band->queue_depth++;
76 0 : dev->stats.io_activity_total += rq->num_blocks;
77 :
78 0 : ftl_band_iter_advance(band, rq->num_blocks);
79 0 : if (ftl_band_filled(band, band->md->iter.offset)) {
80 0 : ftl_band_set_state(band, FTL_BAND_STATE_FULL);
81 0 : band->owner.state_change_fn(band);
82 : }
83 0 : }
84 :
85 : static void ftl_band_rq_bdev_read(void *_entry);
86 :
87 : static void
88 0 : read_rq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
89 : {
90 0 : struct ftl_rq_entry *entry = arg;
91 0 : struct ftl_band *band = entry->io.band;
92 0 : struct ftl_rq *rq = ftl_rq_from_entry(entry);
93 :
94 0 : ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_GC, bdev_io);
95 :
96 0 : rq->success = success;
97 0 : if (spdk_unlikely(!success)) {
98 0 : ftl_band_rq_bdev_read(entry);
99 0 : spdk_bdev_free_io(bdev_io);
100 0 : return;
101 : }
102 :
103 0 : assert(band->queue_depth > 0);
104 0 : band->queue_depth--;
105 :
106 0 : rq->owner.cb(rq);
107 0 : spdk_bdev_free_io(bdev_io);
108 : }
109 :
110 : static void
111 0 : ftl_band_rq_bdev_read(void *_entry)
112 : {
113 0 : struct ftl_rq_entry *entry = _entry;
114 0 : struct ftl_rq *rq = ftl_rq_from_entry(entry);
115 0 : struct spdk_ftl_dev *dev = rq->dev;
116 : int rc;
117 :
118 0 : rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch, entry->io_payload,
119 : entry->bdev_io.offset_blocks, entry->bdev_io.num_blocks,
120 : read_rq_end, entry);
121 0 : if (spdk_unlikely(rc)) {
122 0 : if (rc == -ENOMEM) {
123 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
124 0 : entry->bdev_io.wait_entry.bdev = bdev;
125 0 : entry->bdev_io.wait_entry.cb_fn = ftl_band_rq_bdev_read;
126 0 : entry->bdev_io.wait_entry.cb_arg = entry;
127 0 : spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &entry->bdev_io.wait_entry);
128 : } else {
129 0 : ftl_abort();
130 : }
131 : }
132 0 : }
133 :
134 : void
135 0 : ftl_band_rq_read(struct ftl_band *band, struct ftl_rq *rq)
136 : {
137 0 : struct spdk_ftl_dev *dev = band->dev;
138 0 : struct ftl_rq_entry *entry = &rq->entries[rq->iter.idx];
139 :
140 0 : assert(rq->iter.idx + rq->iter.count <= rq->num_blocks);
141 :
142 0 : rq->success = false;
143 0 : rq->io.band = band;
144 0 : rq->io.addr = band->md->iter.addr;
145 0 : entry->io.band = band;
146 0 : entry->bdev_io.offset_blocks = rq->io.addr;
147 0 : entry->bdev_io.num_blocks = rq->iter.count;
148 :
149 0 : ftl_band_rq_bdev_read(entry);
150 :
151 0 : dev->stats.io_activity_total += rq->num_blocks;
152 0 : band->queue_depth++;
153 0 : }
154 :
155 : static void
156 0 : write_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
157 : {
158 0 : struct ftl_basic_rq *brq = arg;
159 0 : struct ftl_band *band = brq->io.band;
160 :
161 0 : ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
162 :
163 0 : brq->success = success;
164 :
165 0 : assert(band->queue_depth > 0);
166 0 : band->queue_depth--;
167 :
168 0 : brq->owner.cb(brq);
169 0 : spdk_bdev_free_io(bdev_io);
170 0 : }
171 :
172 : static void
173 0 : ftl_band_brq_bdev_write(void *_brq)
174 : {
175 0 : struct ftl_basic_rq *brq = _brq;
176 0 : struct spdk_ftl_dev *dev = brq->dev;
177 : int rc;
178 :
179 0 : rc = spdk_bdev_write_blocks(dev->base_bdev_desc, dev->base_ioch,
180 : brq->io_payload, brq->io.addr,
181 : brq->num_blocks, write_brq_end, brq);
182 :
183 0 : if (spdk_unlikely(rc)) {
184 0 : if (rc == -ENOMEM) {
185 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
186 0 : brq->io.bdev_io_wait.bdev = bdev;
187 0 : brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_write;
188 0 : brq->io.bdev_io_wait.cb_arg = brq;
189 0 : spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
190 : } else {
191 0 : ftl_abort();
192 : }
193 : }
194 0 : }
195 :
196 : void
197 0 : ftl_band_basic_rq_write(struct ftl_band *band, struct ftl_basic_rq *brq)
198 : {
199 0 : struct spdk_ftl_dev *dev = band->dev;
200 :
201 0 : brq->io.addr = band->md->iter.addr;
202 0 : brq->io.band = band;
203 0 : brq->success = false;
204 :
205 0 : ftl_band_brq_bdev_write(brq);
206 :
207 0 : dev->stats.io_activity_total += brq->num_blocks;
208 0 : band->queue_depth++;
209 0 : ftl_band_iter_advance(band, brq->num_blocks);
210 0 : if (ftl_band_filled(band, band->md->iter.offset)) {
211 0 : ftl_band_set_state(band, FTL_BAND_STATE_FULL);
212 0 : band->owner.state_change_fn(band);
213 : }
214 0 : }
215 :
216 : static void
217 0 : read_brq_end(struct spdk_bdev_io *bdev_io, bool success, void *arg)
218 : {
219 0 : struct ftl_basic_rq *brq = arg;
220 0 : struct ftl_band *band = brq->io.band;
221 :
222 0 : ftl_stats_bdev_io_completed(band->dev, FTL_STATS_TYPE_MD_BASE, bdev_io);
223 :
224 0 : brq->success = success;
225 :
226 0 : assert(band->queue_depth > 0);
227 0 : band->queue_depth--;
228 :
229 0 : brq->owner.cb(brq);
230 0 : spdk_bdev_free_io(bdev_io);
231 0 : }
232 :
233 : static void
234 0 : ftl_band_brq_bdev_read(void *_brq)
235 : {
236 0 : struct ftl_basic_rq *brq = _brq;
237 0 : struct spdk_ftl_dev *dev = brq->dev;
238 : int rc;
239 :
240 0 : rc = spdk_bdev_read_blocks(dev->base_bdev_desc, dev->base_ioch,
241 : brq->io_payload, brq->io.addr,
242 : brq->num_blocks, read_brq_end, brq);
243 0 : if (spdk_unlikely(rc)) {
244 0 : if (rc == -ENOMEM) {
245 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(dev->base_bdev_desc);
246 0 : brq->io.bdev_io_wait.bdev = bdev;
247 0 : brq->io.bdev_io_wait.cb_fn = ftl_band_brq_bdev_read;
248 0 : brq->io.bdev_io_wait.cb_arg = brq;
249 0 : spdk_bdev_queue_io_wait(bdev, dev->base_ioch, &brq->io.bdev_io_wait);
250 : } else {
251 0 : ftl_abort();
252 : }
253 : }
254 0 : }
255 :
256 : void
257 0 : ftl_band_basic_rq_read(struct ftl_band *band, struct ftl_basic_rq *brq)
258 : {
259 0 : struct spdk_ftl_dev *dev = brq->dev;
260 :
261 0 : brq->io.band = band;
262 :
263 0 : ftl_band_brq_bdev_read(brq);
264 :
265 0 : brq->io.band->queue_depth++;
266 0 : dev->stats.io_activity_total += brq->num_blocks;
267 0 : }
268 :
269 : static void
270 0 : band_open_cb(int status, void *cb_arg)
271 : {
272 0 : struct ftl_band *band = cb_arg;
273 :
274 0 : if (spdk_unlikely(status)) {
275 : #ifdef SPDK_FTL_RETRY_ON_ERROR
276 : ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
277 : return;
278 : #else
279 0 : ftl_abort();
280 : #endif
281 : }
282 :
283 0 : ftl_band_set_state(band, FTL_BAND_STATE_OPEN);
284 0 : }
285 :
286 : void
287 0 : ftl_band_open(struct ftl_band *band, enum ftl_band_type type)
288 : {
289 0 : struct spdk_ftl_dev *dev = band->dev;
290 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
291 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
292 0 : struct ftl_p2l_map *p2l_map = &band->p2l_map;
293 :
294 0 : ftl_band_set_type(band, type);
295 0 : ftl_band_set_state(band, FTL_BAND_STATE_OPENING);
296 :
297 0 : memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
298 0 : p2l_map->band_dma_md->state = FTL_BAND_STATE_OPEN;
299 0 : p2l_map->band_dma_md->p2l_map_checksum = 0;
300 :
301 0 : if (spdk_unlikely(0 != band->p2l_map.num_valid)) {
302 : /*
303 : * This is inconsistent state, a band with valid block,
304 : * it could be moved on the free list
305 : */
306 0 : assert(false && 0 == band->p2l_map.num_valid);
307 : ftl_abort();
308 : }
309 :
310 0 : ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
311 : band_open_cb, band, &band->md_persist_entry_ctx);
312 0 : }
313 :
314 : static void
315 0 : band_close_cb(int status, void *cb_arg)
316 : {
317 0 : struct ftl_band *band = cb_arg;
318 :
319 0 : if (spdk_unlikely(status)) {
320 : #ifdef SPDK_FTL_RETRY_ON_ERROR
321 : ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
322 : return;
323 : #else
324 0 : ftl_abort();
325 : #endif
326 : }
327 :
328 0 : band->md->p2l_map_checksum = band->p2l_map.band_dma_md->p2l_map_checksum;
329 0 : ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
330 0 : }
331 :
332 : static void
333 0 : band_map_write_cb(struct ftl_basic_rq *brq)
334 : {
335 0 : struct ftl_band *band = brq->io.band;
336 0 : struct ftl_p2l_map *p2l_map = &band->p2l_map;
337 0 : struct spdk_ftl_dev *dev = band->dev;
338 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
339 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
340 : uint32_t band_map_crc;
341 :
342 0 : if (spdk_likely(brq->success)) {
343 :
344 0 : band_map_crc = spdk_crc32c_update(p2l_map->band_map,
345 0 : ftl_tail_md_num_blocks(dev) * FTL_BLOCK_SIZE, 0);
346 0 : memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
347 0 : p2l_map->band_dma_md->state = FTL_BAND_STATE_CLOSED;
348 0 : p2l_map->band_dma_md->p2l_map_checksum = band_map_crc;
349 :
350 0 : ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
351 : band_close_cb, band, &band->md_persist_entry_ctx);
352 : } else {
353 : #ifdef SPDK_FTL_RETRY_ON_ERROR
354 : /* Try to retry in case of failure */
355 : ftl_band_brq_bdev_write(brq);
356 : band->queue_depth++;
357 : #else
358 0 : ftl_abort();
359 : #endif
360 : }
361 0 : }
362 :
363 : void
364 0 : ftl_band_close(struct ftl_band *band)
365 : {
366 0 : struct spdk_ftl_dev *dev = band->dev;
367 0 : void *metadata = band->p2l_map.band_map;
368 0 : uint64_t num_blocks = ftl_tail_md_num_blocks(dev);
369 :
370 : /* Write P2L map first, after completion, set the state to close on nvcache, then internally */
371 0 : band->md->close_seq_id = ftl_get_next_seq_id(dev);
372 0 : ftl_band_set_state(band, FTL_BAND_STATE_CLOSING);
373 0 : ftl_basic_rq_init(dev, &band->metadata_rq, metadata, num_blocks);
374 0 : ftl_basic_rq_set_owner(&band->metadata_rq, band_map_write_cb, band);
375 :
376 0 : ftl_band_basic_rq_write(band, &band->metadata_rq);
377 0 : }
378 :
379 : static void
380 0 : band_free_cb(int status, void *ctx)
381 : {
382 0 : struct ftl_band *band = (struct ftl_band *)ctx;
383 :
384 0 : if (spdk_unlikely(status)) {
385 : #ifdef SPDK_FTL_RETRY_ON_ERROR
386 : ftl_md_persist_entry_retry(&band->md_persist_entry_ctx);
387 : return;
388 : #else
389 0 : ftl_abort();
390 : #endif
391 : }
392 :
393 0 : ftl_band_release_p2l_map(band);
394 0 : FTL_DEBUGLOG(band->dev, "Band is going to free state. Band id: %u\n", band->id);
395 0 : ftl_band_set_state(band, FTL_BAND_STATE_FREE);
396 0 : assert(0 == band->p2l_map.ref_cnt);
397 0 : }
398 :
399 : void
400 0 : ftl_band_free(struct ftl_band *band)
401 : {
402 0 : struct spdk_ftl_dev *dev = band->dev;
403 0 : struct ftl_p2l_map *p2l_map = &band->p2l_map;
404 0 : struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_BAND_MD];
405 0 : struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_BAND_MD);
406 :
407 0 : memcpy(p2l_map->band_dma_md, band->md, region->entry_size * FTL_BLOCK_SIZE);
408 0 : p2l_map->band_dma_md->state = FTL_BAND_STATE_FREE;
409 0 : p2l_map->band_dma_md->close_seq_id = 0;
410 0 : p2l_map->band_dma_md->p2l_map_checksum = 0;
411 :
412 0 : ftl_md_persist_entry(md, band->id, p2l_map->band_dma_md, NULL,
413 : band_free_cb, band, &band->md_persist_entry_ctx);
414 :
415 : /* TODO: The whole band erase code should probably be done here instead */
416 0 : }
417 :
418 : static void
419 0 : read_md_cb(struct ftl_basic_rq *brq)
420 : {
421 0 : struct ftl_band *band = brq->owner.priv;
422 0 : struct spdk_ftl_dev *dev = band->dev;
423 : ftl_band_ops_cb cb;
424 : uint32_t band_map_crc;
425 0 : bool success = true;
426 : void *priv;
427 :
428 0 : cb = band->owner.ops_fn;
429 0 : priv = band->owner.priv;
430 :
431 0 : if (!brq->success) {
432 0 : ftl_band_basic_rq_read(band, &band->metadata_rq);
433 0 : return;
434 : }
435 :
436 0 : band_map_crc = spdk_crc32c_update(band->p2l_map.band_map,
437 0 : ftl_tail_md_num_blocks(band->dev) * FTL_BLOCK_SIZE, 0);
438 0 : if (band->md->p2l_map_checksum && band->md->p2l_map_checksum != band_map_crc) {
439 0 : FTL_ERRLOG(dev, "GC error, inconsistent P2L map CRC\n");
440 0 : success = false;
441 :
442 0 : ftl_stats_crc_error(band->dev, FTL_STATS_TYPE_GC);
443 : }
444 0 : band->owner.ops_fn = NULL;
445 0 : band->owner.priv = NULL;
446 0 : cb(band, priv, success);
447 : }
448 :
449 : static int
450 0 : _read_md(struct ftl_band *band)
451 : {
452 0 : struct spdk_ftl_dev *dev = band->dev;
453 0 : struct ftl_basic_rq *rq = &band->metadata_rq;
454 :
455 0 : if (ftl_band_alloc_p2l_map(band)) {
456 0 : return -ENOMEM;
457 : }
458 :
459 : /* Read P2L map */
460 0 : ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_p2l_map_num_blocks(dev));
461 0 : ftl_basic_rq_set_owner(rq, read_md_cb, band);
462 :
463 0 : rq->io.band = band;
464 0 : rq->io.addr = ftl_band_p2l_map_addr(band);
465 :
466 0 : ftl_band_basic_rq_read(band, &band->metadata_rq);
467 :
468 0 : return 0;
469 : }
470 :
471 : static void
472 0 : read_md(void *band)
473 : {
474 : int rc;
475 :
476 0 : rc = _read_md(band);
477 0 : if (spdk_unlikely(rc)) {
478 0 : spdk_thread_send_msg(spdk_get_thread(), read_md, band);
479 : }
480 0 : }
481 :
482 : static void
483 0 : read_tail_md_cb(struct ftl_basic_rq *brq)
484 : {
485 0 : struct ftl_band *band = brq->owner.priv;
486 0 : enum ftl_md_status status = FTL_MD_IO_FAILURE;
487 : ftl_band_md_cb cb;
488 : void *priv;
489 :
490 0 : if (spdk_unlikely(!brq->success)) {
491 : /* Retries the read in case of error */
492 0 : ftl_band_basic_rq_read(band, &band->metadata_rq);
493 0 : return;
494 : }
495 :
496 0 : cb = band->owner.md_fn;
497 0 : band->owner.md_fn = NULL;
498 :
499 0 : priv = band->owner.priv;
500 0 : band->owner.priv = NULL;
501 :
502 0 : status = FTL_MD_SUCCESS;
503 :
504 0 : cb(band, priv, status);
505 : }
506 :
507 : void
508 0 : ftl_band_read_tail_brq_md(struct ftl_band *band, ftl_band_md_cb cb, void *cntx)
509 : {
510 0 : struct spdk_ftl_dev *dev = band->dev;
511 0 : struct ftl_basic_rq *rq = &band->metadata_rq;
512 :
513 0 : ftl_basic_rq_init(dev, rq, band->p2l_map.band_map, ftl_tail_md_num_blocks(dev));
514 0 : ftl_basic_rq_set_owner(rq, read_tail_md_cb, band);
515 :
516 0 : assert(!band->owner.md_fn);
517 0 : assert(!band->owner.priv);
518 0 : band->owner.md_fn = cb;
519 0 : band->owner.priv = cntx;
520 :
521 0 : rq->io.band = band;
522 0 : rq->io.addr = band->tail_md_addr;
523 :
524 0 : ftl_band_basic_rq_read(band, &band->metadata_rq);
525 0 : }
526 :
527 : void
528 0 : ftl_band_get_next_gc(struct spdk_ftl_dev *dev, ftl_band_ops_cb cb, void *cntx)
529 : {
530 0 : struct ftl_band *band = ftl_band_search_next_to_reloc(dev);
531 :
532 : /* if disk is very small, GC start very early that no band is ready for it */
533 0 : if (spdk_unlikely(!band)) {
534 0 : cb(NULL, cntx, false);
535 0 : return;
536 : }
537 :
538 : /* Only one owner is allowed */
539 0 : assert(!band->queue_depth);
540 0 : assert(!band->owner.ops_fn);
541 0 : assert(!band->owner.priv);
542 0 : band->owner.ops_fn = cb;
543 0 : band->owner.priv = cntx;
544 :
545 0 : read_md(band);
546 : }
|