Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2017, IBM Corporation. All rights reserved.
4 : * Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
5 : */
6 :
7 : /*
8 : * NVMe over PCIe transport
9 : */
10 :
11 : #include "spdk/stdinc.h"
12 : #include "spdk/env.h"
13 : #include "spdk/likely.h"
14 : #include "spdk/string.h"
15 : #include "nvme_internal.h"
16 : #include "nvme_pcie_internal.h"
17 :
18 : struct nvme_pcie_enum_ctx {
19 : struct spdk_nvme_probe_ctx *probe_ctx;
20 : struct spdk_pci_addr pci_addr;
21 : bool has_pci_addr;
22 : };
23 :
24 : static uint16_t g_signal_lock;
25 : static bool g_sigset = false;
26 : static spdk_nvme_pcie_hotplug_filter_cb g_hotplug_filter_cb;
27 :
28 : static void
29 0 : nvme_sigbus_fault_sighandler(const void *failure_addr, void *ctx)
30 : {
31 : void *map_address;
32 0 : uint16_t flag = 0;
33 :
34 0 : if (!__atomic_compare_exchange_n(&g_signal_lock, &flag, 1, false, __ATOMIC_ACQUIRE,
35 : __ATOMIC_RELAXED)) {
36 0 : SPDK_DEBUGLOG(nvme, "request g_signal_lock failed\n");
37 0 : return;
38 : }
39 :
40 0 : if (g_thread_mmio_ctrlr == NULL) {
41 0 : return;
42 : }
43 :
44 0 : if (!g_thread_mmio_ctrlr->is_remapped) {
45 0 : map_address = mmap((void *)g_thread_mmio_ctrlr->regs, g_thread_mmio_ctrlr->regs_size,
46 : PROT_READ | PROT_WRITE,
47 : MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
48 0 : if (map_address == MAP_FAILED) {
49 0 : SPDK_ERRLOG("mmap failed\n");
50 0 : __atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE);
51 0 : return;
52 : }
53 0 : memset(map_address, 0xFF, sizeof(struct spdk_nvme_registers));
54 0 : g_thread_mmio_ctrlr->regs = (volatile struct spdk_nvme_registers *)map_address;
55 0 : g_thread_mmio_ctrlr->is_remapped = true;
56 : }
57 0 : __atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE);
58 : }
59 :
60 : static void
61 2 : _nvme_pcie_event_process(struct spdk_pci_event *event, void *cb_ctx)
62 : {
63 2 : struct spdk_nvme_transport_id trid;
64 : struct spdk_nvme_ctrlr *ctrlr;
65 :
66 2 : if (event->action == SPDK_UEVENT_ADD) {
67 1 : if (spdk_process_is_primary()) {
68 1 : if (g_hotplug_filter_cb == NULL || g_hotplug_filter_cb(&event->traddr)) {
69 : /* The enumerate interface implement the add operation */
70 1 : spdk_pci_device_allow(&event->traddr);
71 : }
72 : }
73 1 : } else if (event->action == SPDK_UEVENT_REMOVE) {
74 1 : memset(&trid, 0, sizeof(trid));
75 1 : spdk_nvme_trid_populate_transport(&trid, SPDK_NVME_TRANSPORT_PCIE);
76 :
77 1 : if (spdk_pci_addr_fmt(trid.traddr, sizeof(trid.traddr), &event->traddr) < 0) {
78 0 : SPDK_ERRLOG("Failed to format pci address\n");
79 0 : return;
80 : }
81 :
82 1 : ctrlr = nvme_get_ctrlr_by_trid_unsafe(&trid, NULL);
83 1 : if (ctrlr == NULL) {
84 0 : return;
85 : }
86 1 : SPDK_DEBUGLOG(nvme, "remove nvme address: %s\n", trid.traddr);
87 :
88 1 : nvme_ctrlr_lock(ctrlr);
89 1 : nvme_ctrlr_fail(ctrlr, true);
90 1 : nvme_ctrlr_unlock(ctrlr);
91 :
92 : /* get the user app to clean up and stop I/O */
93 1 : if (ctrlr->remove_cb) {
94 0 : nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
95 0 : ctrlr->remove_cb(ctrlr->cb_ctx, ctrlr);
96 0 : nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
97 : }
98 : }
99 : }
100 :
101 : static int
102 5 : _nvme_pcie_hotplug_monitor(struct spdk_nvme_probe_ctx *probe_ctx)
103 : {
104 : struct spdk_nvme_ctrlr *ctrlr, *tmp;
105 5 : struct spdk_pci_event event;
106 5 : int rc = 0;
107 :
108 5 : if (g_spdk_nvme_driver->hotplug_fd >= 0) {
109 7 : while (spdk_pci_get_event(g_spdk_nvme_driver->hotplug_fd, &event) > 0) {
110 2 : _nvme_pcie_event_process(&event, probe_ctx->cb_ctx);
111 : }
112 : }
113 :
114 : /* Initiate removal of physically hotremoved PCI controllers. Even after
115 : * they're hotremoved from the system, SPDK might still report them via RPC.
116 : */
117 8 : TAILQ_FOREACH_SAFE(ctrlr, &g_spdk_nvme_driver->shared_attached_ctrlrs, tailq, tmp) {
118 3 : bool do_remove = false;
119 : struct nvme_pcie_ctrlr *pctrlr;
120 :
121 3 : if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
122 0 : continue;
123 : }
124 :
125 3 : pctrlr = nvme_pcie_ctrlr(ctrlr);
126 3 : if (spdk_pci_device_is_removed(pctrlr->devhandle)) {
127 2 : do_remove = true;
128 2 : rc = 1;
129 : }
130 :
131 3 : if (do_remove) {
132 2 : nvme_ctrlr_lock(ctrlr);
133 2 : nvme_ctrlr_fail(ctrlr, true);
134 2 : nvme_ctrlr_unlock(ctrlr);
135 2 : if (ctrlr->remove_cb) {
136 0 : nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
137 0 : ctrlr->remove_cb(ctrlr->cb_ctx, ctrlr);
138 0 : nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
139 : }
140 : }
141 : }
142 5 : return rc;
143 : }
144 :
145 : static volatile void *
146 70 : nvme_pcie_reg_addr(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset)
147 : {
148 70 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
149 :
150 70 : return (volatile void *)((uintptr_t)pctrlr->regs + offset);
151 : }
152 :
153 : static volatile struct spdk_nvme_registers *
154 0 : nvme_pcie_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
155 : {
156 0 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
157 :
158 0 : return pctrlr->regs;
159 : }
160 :
161 : static int
162 38 : nvme_pcie_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
163 : {
164 38 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
165 :
166 38 : assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
167 38 : g_thread_mmio_ctrlr = pctrlr;
168 38 : spdk_mmio_write_4(nvme_pcie_reg_addr(ctrlr, offset), value);
169 38 : g_thread_mmio_ctrlr = NULL;
170 38 : return 0;
171 : }
172 :
173 : static int
174 2 : nvme_pcie_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
175 : {
176 2 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
177 :
178 2 : assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
179 2 : g_thread_mmio_ctrlr = pctrlr;
180 2 : spdk_mmio_write_8(nvme_pcie_reg_addr(ctrlr, offset), value);
181 2 : g_thread_mmio_ctrlr = NULL;
182 2 : return 0;
183 : }
184 :
185 : static int
186 29 : nvme_pcie_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
187 : {
188 29 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
189 :
190 29 : assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
191 29 : assert(value != NULL);
192 29 : g_thread_mmio_ctrlr = pctrlr;
193 29 : *value = spdk_mmio_read_4(nvme_pcie_reg_addr(ctrlr, offset));
194 29 : g_thread_mmio_ctrlr = NULL;
195 29 : if (~(*value) == 0) {
196 0 : return -1;
197 : }
198 :
199 29 : return 0;
200 : }
201 :
202 : static int
203 1 : nvme_pcie_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
204 : {
205 1 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
206 :
207 1 : assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
208 1 : assert(value != NULL);
209 1 : g_thread_mmio_ctrlr = pctrlr;
210 1 : *value = spdk_mmio_read_8(nvme_pcie_reg_addr(ctrlr, offset));
211 1 : g_thread_mmio_ctrlr = NULL;
212 1 : if (~(*value) == 0) {
213 0 : return -1;
214 : }
215 :
216 1 : return 0;
217 : }
218 :
219 : static int
220 0 : nvme_pcie_ctrlr_set_asq(struct nvme_pcie_ctrlr *pctrlr, uint64_t value)
221 : {
222 0 : return nvme_pcie_ctrlr_set_reg_8(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, asq),
223 : value);
224 : }
225 :
226 : static int
227 0 : nvme_pcie_ctrlr_set_acq(struct nvme_pcie_ctrlr *pctrlr, uint64_t value)
228 : {
229 0 : return nvme_pcie_ctrlr_set_reg_8(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, acq),
230 : value);
231 : }
232 :
233 : static int
234 0 : nvme_pcie_ctrlr_set_aqa(struct nvme_pcie_ctrlr *pctrlr, const union spdk_nvme_aqa_register *aqa)
235 : {
236 0 : return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, aqa.raw),
237 0 : aqa->raw);
238 : }
239 :
240 : static int
241 6 : nvme_pcie_ctrlr_get_cmbloc(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbloc_register *cmbloc)
242 : {
243 6 : return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
244 : &cmbloc->raw);
245 : }
246 :
247 : static int
248 5 : nvme_pcie_ctrlr_get_cmbsz(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbsz_register *cmbsz)
249 : {
250 5 : return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
251 : &cmbsz->raw);
252 : }
253 :
254 : static int
255 9 : nvme_pcie_ctrlr_get_pmrcap(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrcap_register *pmrcap)
256 : {
257 9 : return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
258 : &pmrcap->raw);
259 : }
260 :
261 : static int
262 4 : nvme_pcie_ctrlr_set_pmrctl(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrctl_register *pmrctl)
263 : {
264 4 : return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrctl.raw),
265 : pmrctl->raw);
266 : }
267 :
268 : static int
269 5 : nvme_pcie_ctrlr_get_pmrctl(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrctl_register *pmrctl)
270 : {
271 5 : return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrctl.raw),
272 : &pmrctl->raw);
273 : }
274 :
275 : static int
276 3 : nvme_pcie_ctrlr_get_pmrsts(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrsts_register *pmrsts)
277 : {
278 3 : return nvme_pcie_ctrlr_get_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
279 : &pmrsts->raw);
280 : }
281 :
282 : static int
283 2 : nvme_pcie_ctrlr_set_pmrmscl(struct nvme_pcie_ctrlr *pctrlr, uint32_t value)
284 : {
285 2 : return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrmscl.raw),
286 : value);
287 : }
288 :
289 : static int
290 2 : nvme_pcie_ctrlr_set_pmrmscu(struct nvme_pcie_ctrlr *pctrlr, uint32_t value)
291 : {
292 2 : return nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrmscu),
293 : value);
294 : }
295 :
296 : static uint32_t
297 0 : nvme_pcie_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
298 : {
299 : /*
300 : * For commands requiring more than 2 PRP entries, one PRP will be
301 : * embedded in the command (prp1), and the rest of the PRP entries
302 : * will be in a list pointed to by the command (prp2). The number
303 : * of PRP entries in the list is defined by
304 : * NVME_MAX_PRP_LIST_ENTRIES.
305 : *
306 : * Note that the max xfer size is not (MAX_ENTRIES + 1) * page_size
307 : * because the first PRP entry may not be aligned on a 4KiB
308 : * boundary.
309 : */
310 0 : return NVME_MAX_PRP_LIST_ENTRIES * ctrlr->page_size;
311 : }
312 :
313 : static uint16_t
314 0 : nvme_pcie_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
315 : {
316 0 : return NVME_MAX_SGL_DESCRIPTORS;
317 : }
318 :
319 : static void
320 2 : nvme_pcie_ctrlr_map_cmb(struct nvme_pcie_ctrlr *pctrlr)
321 : {
322 : int rc;
323 2 : void *addr = NULL;
324 : uint32_t bir;
325 2 : union spdk_nvme_cmbsz_register cmbsz;
326 2 : union spdk_nvme_cmbloc_register cmbloc;
327 2 : uint64_t size, unit_size, offset, bar_size = 0, bar_phys_addr = 0;
328 :
329 4 : if (nvme_pcie_ctrlr_get_cmbsz(pctrlr, &cmbsz) ||
330 2 : nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
331 0 : SPDK_ERRLOG("get registers failed\n");
332 0 : goto exit;
333 : }
334 :
335 2 : if (!cmbsz.bits.sz) {
336 1 : goto exit;
337 : }
338 :
339 1 : bir = cmbloc.bits.bir;
340 : /* Values 0 2 3 4 5 are valid for BAR */
341 1 : if (bir > 5 || bir == 1) {
342 0 : goto exit;
343 : }
344 :
345 : /* unit size for 4KB/64KB/1MB/16MB/256MB/4GB/64GB */
346 1 : unit_size = (uint64_t)1 << (12 + 4 * cmbsz.bits.szu);
347 : /* controller memory buffer size in Bytes */
348 1 : size = unit_size * cmbsz.bits.sz;
349 : /* controller memory buffer offset from BAR in Bytes */
350 1 : offset = unit_size * cmbloc.bits.ofst;
351 :
352 1 : rc = spdk_pci_device_map_bar(pctrlr->devhandle, bir, &addr,
353 : &bar_phys_addr, &bar_size);
354 1 : if ((rc != 0) || addr == NULL) {
355 0 : goto exit;
356 : }
357 :
358 1 : if (offset > bar_size) {
359 0 : goto exit;
360 : }
361 :
362 1 : if (size > bar_size - offset) {
363 0 : goto exit;
364 : }
365 :
366 1 : pctrlr->cmb.bar_va = addr;
367 1 : pctrlr->cmb.bar_pa = bar_phys_addr;
368 1 : pctrlr->cmb.size = size;
369 1 : pctrlr->cmb.current_offset = offset;
370 :
371 1 : if (!cmbsz.bits.sqs) {
372 1 : pctrlr->ctrlr.opts.use_cmb_sqs = false;
373 : }
374 :
375 1 : return;
376 1 : exit:
377 1 : pctrlr->ctrlr.opts.use_cmb_sqs = false;
378 1 : return;
379 : }
380 :
381 : static int
382 1 : nvme_pcie_ctrlr_unmap_cmb(struct nvme_pcie_ctrlr *pctrlr)
383 : {
384 1 : int rc = 0;
385 1 : union spdk_nvme_cmbloc_register cmbloc;
386 1 : void *addr = pctrlr->cmb.bar_va;
387 :
388 1 : if (addr) {
389 1 : if (pctrlr->cmb.mem_register_addr) {
390 0 : spdk_mem_unregister(pctrlr->cmb.mem_register_addr, pctrlr->cmb.mem_register_size);
391 : }
392 :
393 1 : if (nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
394 0 : SPDK_ERRLOG("get_cmbloc() failed\n");
395 0 : return -EIO;
396 : }
397 1 : rc = spdk_pci_device_unmap_bar(pctrlr->devhandle, cmbloc.bits.bir, addr);
398 : }
399 1 : return rc;
400 : }
401 :
402 : static int
403 0 : nvme_pcie_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
404 : {
405 0 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
406 :
407 0 : if (pctrlr->cmb.bar_va == NULL) {
408 0 : SPDK_DEBUGLOG(nvme, "CMB not available\n");
409 0 : return -ENOTSUP;
410 : }
411 :
412 0 : if (ctrlr->opts.use_cmb_sqs) {
413 0 : SPDK_ERRLOG("CMB is already in use for submission queues.\n");
414 0 : return -ENOTSUP;
415 : }
416 :
417 0 : return 0;
418 : }
419 :
420 : static void *
421 6 : nvme_pcie_ctrlr_map_io_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
422 : {
423 6 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
424 6 : union spdk_nvme_cmbsz_register cmbsz;
425 6 : union spdk_nvme_cmbloc_register cmbloc;
426 : uint64_t mem_register_start, mem_register_end;
427 : int rc;
428 :
429 6 : if (pctrlr->cmb.mem_register_addr != NULL) {
430 1 : *size = pctrlr->cmb.mem_register_size;
431 1 : return pctrlr->cmb.mem_register_addr;
432 : }
433 :
434 5 : *size = 0;
435 :
436 5 : if (pctrlr->cmb.bar_va == NULL) {
437 1 : SPDK_DEBUGLOG(nvme, "CMB not available\n");
438 1 : return NULL;
439 : }
440 :
441 4 : if (ctrlr->opts.use_cmb_sqs) {
442 1 : SPDK_ERRLOG("CMB is already in use for submission queues.\n");
443 1 : return NULL;
444 : }
445 :
446 6 : if (nvme_pcie_ctrlr_get_cmbsz(pctrlr, &cmbsz) ||
447 3 : nvme_pcie_ctrlr_get_cmbloc(pctrlr, &cmbloc)) {
448 0 : SPDK_ERRLOG("get registers failed\n");
449 0 : return NULL;
450 : }
451 :
452 : /* If only SQS is supported */
453 3 : if (!(cmbsz.bits.wds || cmbsz.bits.rds)) {
454 1 : return NULL;
455 : }
456 :
457 : /* If CMB is less than 4MiB in size then abort CMB mapping */
458 2 : if (pctrlr->cmb.size < (1ULL << 22)) {
459 1 : return NULL;
460 : }
461 :
462 1 : mem_register_start = _2MB_PAGE((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset +
463 : VALUE_2MB - 1);
464 1 : mem_register_end = _2MB_PAGE((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset +
465 : pctrlr->cmb.size);
466 :
467 1 : rc = spdk_mem_register((void *)mem_register_start, mem_register_end - mem_register_start);
468 1 : if (rc) {
469 0 : SPDK_ERRLOG("spdk_mem_register() failed\n");
470 0 : return NULL;
471 : }
472 :
473 1 : pctrlr->cmb.mem_register_addr = (void *)mem_register_start;
474 1 : pctrlr->cmb.mem_register_size = mem_register_end - mem_register_start;
475 :
476 1 : *size = pctrlr->cmb.mem_register_size;
477 1 : return pctrlr->cmb.mem_register_addr;
478 : }
479 :
480 : static int
481 1 : nvme_pcie_ctrlr_unmap_io_cmb(struct spdk_nvme_ctrlr *ctrlr)
482 : {
483 1 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
484 : int rc;
485 :
486 1 : if (pctrlr->cmb.mem_register_addr == NULL) {
487 0 : return 0;
488 : }
489 :
490 1 : rc = spdk_mem_unregister(pctrlr->cmb.mem_register_addr, pctrlr->cmb.mem_register_size);
491 :
492 1 : if (rc == 0) {
493 1 : pctrlr->cmb.mem_register_addr = NULL;
494 1 : pctrlr->cmb.mem_register_size = 0;
495 : }
496 :
497 1 : return rc;
498 : }
499 :
500 : static void
501 2 : nvme_pcie_ctrlr_map_pmr(struct nvme_pcie_ctrlr *pctrlr)
502 : {
503 : int rc;
504 2 : void *addr = NULL;
505 : uint32_t bir;
506 2 : union spdk_nvme_pmrcap_register pmrcap;
507 2 : uint64_t bar_size = 0, bar_phys_addr = 0;
508 :
509 2 : if (!pctrlr->regs->cap.bits.pmrs) {
510 0 : return;
511 : }
512 :
513 2 : if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
514 0 : SPDK_ERRLOG("get registers failed\n");
515 0 : return;
516 : }
517 :
518 2 : bir = pmrcap.bits.bir;
519 : /* Values 2 3 4 5 are valid for BAR */
520 2 : if (bir > 5 || bir < 2) {
521 1 : SPDK_ERRLOG("invalid base indicator register value\n");
522 1 : return;
523 : }
524 :
525 1 : rc = spdk_pci_device_map_bar(pctrlr->devhandle, bir, &addr, &bar_phys_addr, &bar_size);
526 1 : if ((rc != 0) || addr == NULL) {
527 0 : SPDK_ERRLOG("could not map the bar %d\n", bir);
528 0 : return;
529 : }
530 :
531 1 : if (pmrcap.bits.cmss) {
532 1 : uint32_t pmrmscl, pmrmscu, cmse = 1;
533 1 : union spdk_nvme_pmrsts_register pmrsts;
534 :
535 : /* Enable Controller Memory Space */
536 1 : pmrmscl = (uint32_t)((bar_phys_addr & 0xFFFFF000ULL) | (cmse << 1));
537 1 : pmrmscu = (uint32_t)((bar_phys_addr >> 32ULL) & 0xFFFFFFFFULL);
538 :
539 1 : if (nvme_pcie_ctrlr_set_pmrmscu(pctrlr, pmrmscu)) {
540 0 : SPDK_ERRLOG("set_pmrmscu() failed\n");
541 0 : spdk_pci_device_unmap_bar(pctrlr->devhandle, bir, addr);
542 0 : return;
543 : }
544 :
545 1 : if (nvme_pcie_ctrlr_set_pmrmscl(pctrlr, pmrmscl)) {
546 0 : SPDK_ERRLOG("set_pmrmscl() failed\n");
547 0 : spdk_pci_device_unmap_bar(pctrlr->devhandle, bir, addr);
548 0 : return;
549 : }
550 :
551 1 : if (nvme_pcie_ctrlr_get_pmrsts(pctrlr, &pmrsts)) {
552 0 : SPDK_ERRLOG("get pmrsts failed\n");
553 0 : spdk_pci_device_unmap_bar(pctrlr->devhandle, bir, addr);
554 0 : return;
555 : }
556 :
557 1 : if (pmrsts.bits.cbai) {
558 0 : SPDK_ERRLOG("Controller Memory Space Enable Failure\n");
559 0 : SPDK_ERRLOG("CBA Invalid - Host Addresses cannot reference PMR\n");
560 : } else {
561 1 : SPDK_DEBUGLOG(nvme, "Controller Memory Space Enable Success\n");
562 1 : SPDK_DEBUGLOG(nvme, "Host Addresses can reference PMR\n");
563 : }
564 : }
565 :
566 1 : pctrlr->pmr.bar_va = addr;
567 1 : pctrlr->pmr.bar_pa = bar_phys_addr;
568 1 : pctrlr->pmr.size = pctrlr->ctrlr.pmr_size = bar_size;
569 : }
570 :
571 : static int
572 1 : nvme_pcie_ctrlr_unmap_pmr(struct nvme_pcie_ctrlr *pctrlr)
573 : {
574 1 : int rc = 0;
575 1 : union spdk_nvme_pmrcap_register pmrcap;
576 1 : void *addr = pctrlr->pmr.bar_va;
577 :
578 1 : if (addr == NULL) {
579 0 : return rc;
580 : }
581 :
582 1 : if (pctrlr->pmr.mem_register_addr) {
583 0 : spdk_mem_unregister(pctrlr->pmr.mem_register_addr, pctrlr->pmr.mem_register_size);
584 : }
585 :
586 1 : if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
587 0 : SPDK_ERRLOG("get_pmrcap() failed\n");
588 0 : return -EIO;
589 : }
590 :
591 1 : if (pmrcap.bits.cmss) {
592 1 : if (nvme_pcie_ctrlr_set_pmrmscu(pctrlr, 0)) {
593 0 : SPDK_ERRLOG("set_pmrmscu() failed\n");
594 : }
595 :
596 1 : if (nvme_pcie_ctrlr_set_pmrmscl(pctrlr, 0)) {
597 0 : SPDK_ERRLOG("set_pmrmscl() failed\n");
598 : }
599 : }
600 :
601 1 : rc = spdk_pci_device_unmap_bar(pctrlr->devhandle, pmrcap.bits.bir, addr);
602 :
603 1 : return rc;
604 : }
605 :
606 : static int
607 3 : nvme_pcie_ctrlr_config_pmr(struct spdk_nvme_ctrlr *ctrlr, bool enable)
608 : {
609 3 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
610 3 : union spdk_nvme_pmrcap_register pmrcap;
611 3 : union spdk_nvme_pmrctl_register pmrctl;
612 3 : union spdk_nvme_pmrsts_register pmrsts;
613 : uint8_t pmrto, pmrtu;
614 : uint64_t timeout_in_ms, ticks_per_ms, timeout_in_ticks, now_ticks;
615 :
616 3 : if (!pctrlr->regs->cap.bits.pmrs) {
617 0 : SPDK_ERRLOG("PMR is not supported by the controller\n");
618 0 : return -ENOTSUP;
619 : }
620 :
621 3 : if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
622 0 : SPDK_ERRLOG("get registers failed\n");
623 0 : return -EIO;
624 : }
625 :
626 3 : pmrto = pmrcap.bits.pmrto;
627 3 : pmrtu = pmrcap.bits.pmrtu;
628 :
629 3 : if (pmrtu > 1) {
630 0 : SPDK_ERRLOG("PMR Time Units Invalid\n");
631 0 : return -EINVAL;
632 : }
633 :
634 3 : ticks_per_ms = spdk_get_ticks_hz() / 1000;
635 3 : timeout_in_ms = pmrto * (pmrtu ? (60 * 1000) : 500);
636 3 : timeout_in_ticks = timeout_in_ms * ticks_per_ms;
637 :
638 3 : if (nvme_pcie_ctrlr_get_pmrctl(pctrlr, &pmrctl)) {
639 0 : SPDK_ERRLOG("get pmrctl failed\n");
640 0 : return -EIO;
641 : }
642 :
643 3 : if (enable && pmrctl.bits.en != 0) {
644 0 : SPDK_ERRLOG("PMR is already enabled\n");
645 0 : return -EINVAL;
646 3 : } else if (!enable && pmrctl.bits.en != 1) {
647 1 : SPDK_ERRLOG("PMR is already disabled\n");
648 1 : return -EINVAL;
649 : }
650 :
651 2 : pmrctl.bits.en = enable;
652 :
653 2 : if (nvme_pcie_ctrlr_set_pmrctl(pctrlr, &pmrctl)) {
654 0 : SPDK_ERRLOG("set pmrctl failed\n");
655 0 : return -EIO;
656 : }
657 :
658 2 : now_ticks = spdk_get_ticks();
659 :
660 : do {
661 2 : if (nvme_pcie_ctrlr_get_pmrsts(pctrlr, &pmrsts)) {
662 0 : SPDK_ERRLOG("get pmrsts failed\n");
663 0 : return -EIO;
664 : }
665 :
666 2 : if (pmrsts.bits.nrdy == enable &&
667 0 : spdk_get_ticks() > now_ticks + timeout_in_ticks) {
668 0 : SPDK_ERRLOG("PMR Enable - Timed Out\n");
669 0 : return -ETIMEDOUT;
670 : }
671 2 : } while (pmrsts.bits.nrdy == enable);
672 :
673 2 : SPDK_DEBUGLOG(nvme, "PMR %s\n", enable ? "Enabled" : "Disabled");
674 :
675 2 : return 0;
676 : }
677 :
678 : static int
679 0 : nvme_pcie_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
680 : {
681 0 : return nvme_pcie_ctrlr_config_pmr(ctrlr, true);
682 : }
683 :
684 : static int
685 0 : nvme_pcie_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
686 : {
687 0 : return nvme_pcie_ctrlr_config_pmr(ctrlr, false);
688 : }
689 :
690 : static void *
691 6 : nvme_pcie_ctrlr_map_io_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
692 : {
693 6 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
694 6 : union spdk_nvme_pmrcap_register pmrcap;
695 : uint64_t mem_register_start, mem_register_end;
696 : int rc;
697 :
698 6 : if (!pctrlr->regs->cap.bits.pmrs) {
699 1 : SPDK_ERRLOG("PMR is not supported by the controller\n");
700 1 : return NULL;
701 : }
702 :
703 5 : if (pctrlr->pmr.mem_register_addr != NULL) {
704 1 : *size = pctrlr->pmr.mem_register_size;
705 1 : return pctrlr->pmr.mem_register_addr;
706 : }
707 :
708 4 : *size = 0;
709 :
710 4 : if (pctrlr->pmr.bar_va == NULL) {
711 1 : SPDK_DEBUGLOG(nvme, "PMR not available\n");
712 1 : return NULL;
713 : }
714 :
715 3 : if (nvme_pcie_ctrlr_get_pmrcap(pctrlr, &pmrcap)) {
716 0 : SPDK_ERRLOG("get registers failed\n");
717 0 : return NULL;
718 : }
719 :
720 : /* Check if WDS / RDS is supported */
721 3 : if (!(pmrcap.bits.wds || pmrcap.bits.rds)) {
722 1 : return NULL;
723 : }
724 :
725 : /* If PMR is less than 4MiB in size then abort PMR mapping */
726 2 : if (pctrlr->pmr.size < (1ULL << 22)) {
727 1 : return NULL;
728 : }
729 :
730 1 : mem_register_start = _2MB_PAGE((uintptr_t)pctrlr->pmr.bar_va + VALUE_2MB - 1);
731 1 : mem_register_end = _2MB_PAGE((uintptr_t)pctrlr->pmr.bar_va + pctrlr->pmr.size);
732 :
733 1 : rc = spdk_mem_register((void *)mem_register_start, mem_register_end - mem_register_start);
734 1 : if (rc) {
735 0 : SPDK_ERRLOG("spdk_mem_register() failed\n");
736 0 : return NULL;
737 : }
738 :
739 1 : pctrlr->pmr.mem_register_addr = (void *)mem_register_start;
740 1 : pctrlr->pmr.mem_register_size = mem_register_end - mem_register_start;
741 :
742 1 : *size = pctrlr->pmr.mem_register_size;
743 1 : return pctrlr->pmr.mem_register_addr;
744 : }
745 :
746 : static int
747 0 : nvme_pcie_ctrlr_unmap_io_pmr(struct spdk_nvme_ctrlr *ctrlr)
748 : {
749 0 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
750 : int rc;
751 :
752 0 : if (pctrlr->pmr.mem_register_addr == NULL) {
753 0 : return -ENXIO;
754 : }
755 :
756 0 : rc = spdk_mem_unregister(pctrlr->pmr.mem_register_addr, pctrlr->pmr.mem_register_size);
757 :
758 0 : if (rc == 0) {
759 0 : pctrlr->pmr.mem_register_addr = NULL;
760 0 : pctrlr->pmr.mem_register_size = 0;
761 : }
762 :
763 0 : return rc;
764 : }
765 :
766 : static int
767 0 : nvme_pcie_ctrlr_allocate_bars(struct nvme_pcie_ctrlr *pctrlr)
768 : {
769 : int rc;
770 0 : void *addr = NULL;
771 0 : uint64_t phys_addr = 0, size = 0;
772 :
773 0 : rc = spdk_pci_device_map_bar(pctrlr->devhandle, 0, &addr,
774 : &phys_addr, &size);
775 :
776 0 : if ((addr == NULL) || (rc != 0)) {
777 0 : SPDK_ERRLOG("nvme_pcicfg_map_bar failed with rc %d or bar %p\n",
778 : rc, addr);
779 0 : return -1;
780 : }
781 :
782 0 : pctrlr->regs = (volatile struct spdk_nvme_registers *)addr;
783 0 : pctrlr->regs_size = size;
784 0 : pctrlr->doorbell_base = (volatile uint32_t *)&pctrlr->regs->doorbell[0].sq_tdbl;
785 0 : nvme_pcie_ctrlr_map_cmb(pctrlr);
786 0 : nvme_pcie_ctrlr_map_pmr(pctrlr);
787 :
788 0 : return 0;
789 : }
790 :
791 : static int
792 0 : nvme_pcie_ctrlr_free_bars(struct nvme_pcie_ctrlr *pctrlr)
793 : {
794 0 : int rc = 0;
795 0 : void *addr = (void *)pctrlr->regs;
796 :
797 0 : if (pctrlr->ctrlr.is_removed) {
798 0 : return rc;
799 : }
800 :
801 0 : rc = nvme_pcie_ctrlr_unmap_pmr(pctrlr);
802 0 : if (rc != 0) {
803 0 : SPDK_ERRLOG("nvme_ctrlr_unmap_pmr failed with error code %d\n", rc);
804 0 : return -1;
805 : }
806 :
807 0 : rc = nvme_pcie_ctrlr_unmap_cmb(pctrlr);
808 0 : if (rc != 0) {
809 0 : SPDK_ERRLOG("nvme_ctrlr_unmap_cmb failed with error code %d\n", rc);
810 0 : return -1;
811 : }
812 :
813 0 : if (addr && spdk_process_is_primary()) {
814 : /* NOTE: addr may have been remapped here. We're relying on DPDK to call
815 : * munmap internally.
816 : */
817 0 : rc = spdk_pci_device_unmap_bar(pctrlr->devhandle, 0, addr);
818 : }
819 0 : return rc;
820 : }
821 :
822 : /* This function must only be called while holding g_spdk_nvme_driver->lock */
823 : static int
824 0 : pcie_nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
825 : {
826 0 : struct spdk_nvme_transport_id trid = {};
827 0 : struct nvme_pcie_enum_ctx *enum_ctx = ctx;
828 : struct spdk_nvme_ctrlr *ctrlr;
829 0 : struct spdk_pci_addr pci_addr;
830 :
831 0 : pci_addr = spdk_pci_device_get_addr(pci_dev);
832 :
833 0 : spdk_nvme_trid_populate_transport(&trid, SPDK_NVME_TRANSPORT_PCIE);
834 0 : spdk_pci_addr_fmt(trid.traddr, sizeof(trid.traddr), &pci_addr);
835 :
836 0 : ctrlr = nvme_get_ctrlr_by_trid_unsafe(&trid, NULL);
837 0 : if (!spdk_process_is_primary()) {
838 0 : if (!ctrlr) {
839 0 : SPDK_ERRLOG("Controller must be constructed in the primary process first.\n");
840 0 : return -1;
841 : }
842 :
843 0 : if (ctrlr->opts.enable_interrupts) {
844 0 : SPDK_ERRLOG("Secondary processes are not supported in interrupt mode.\n");
845 0 : return -1;
846 : }
847 :
848 0 : return nvme_ctrlr_add_process(ctrlr, pci_dev);
849 : }
850 :
851 : /* check whether user passes the pci_addr */
852 0 : if (enum_ctx->has_pci_addr &&
853 0 : (spdk_pci_addr_compare(&pci_addr, &enum_ctx->pci_addr) != 0)) {
854 0 : return 1;
855 : }
856 :
857 0 : return nvme_ctrlr_probe(&trid, enum_ctx->probe_ctx, pci_dev);
858 : }
859 :
860 : static int
861 0 : nvme_pci_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx)
862 : {
863 : /* Only the primary process can monitor hotplug. */
864 0 : if (spdk_process_is_primary()) {
865 0 : return _nvme_pcie_hotplug_monitor(probe_ctx);
866 : }
867 0 : return 0;
868 : }
869 :
870 : static int
871 0 : nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
872 : bool direct_connect)
873 : {
874 0 : struct nvme_pcie_enum_ctx enum_ctx = {};
875 :
876 0 : enum_ctx.probe_ctx = probe_ctx;
877 :
878 0 : if (strlen(probe_ctx->trid.traddr) != 0) {
879 0 : if (spdk_pci_addr_parse(&enum_ctx.pci_addr, probe_ctx->trid.traddr)) {
880 0 : return -1;
881 : }
882 0 : enum_ctx.has_pci_addr = true;
883 : }
884 :
885 : /* Only the primary process can monitor hotplug. */
886 0 : if (nvme_pci_ctrlr_scan_attached(probe_ctx) > 0) {
887 : /* Some removal events were received. Return immediately, avoiding
888 : * an spdk_pci_enumerate() which could trigger issue #3205. */
889 0 : return 0;
890 : }
891 :
892 0 : if (enum_ctx.has_pci_addr == false) {
893 0 : return spdk_pci_enumerate(spdk_pci_nvme_get_driver(),
894 : pcie_nvme_enum_cb, &enum_ctx);
895 : } else {
896 0 : return spdk_pci_device_attach(spdk_pci_nvme_get_driver(),
897 : pcie_nvme_enum_cb, &enum_ctx, &enum_ctx.pci_addr);
898 : }
899 : }
900 :
901 : static struct spdk_nvme_ctrlr *
902 0 : nvme_pcie_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
903 : const struct spdk_nvme_ctrlr_opts *opts,
904 : void *devhandle)
905 : {
906 0 : struct spdk_pci_device *pci_dev = devhandle;
907 : struct nvme_pcie_ctrlr *pctrlr;
908 0 : union spdk_nvme_cap_register cap;
909 0 : uint16_t cmd_reg;
910 : int rc;
911 0 : struct spdk_pci_id pci_id;
912 :
913 0 : rc = spdk_pci_device_claim(pci_dev);
914 0 : if (rc < 0) {
915 0 : SPDK_ERRLOG("could not claim device %s (%s)\n",
916 : trid->traddr, spdk_strerror(-rc));
917 0 : return NULL;
918 : }
919 :
920 0 : pctrlr = spdk_zmalloc(sizeof(struct nvme_pcie_ctrlr), 64, NULL,
921 : SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
922 0 : if (pctrlr == NULL) {
923 0 : spdk_pci_device_unclaim(pci_dev);
924 0 : SPDK_ERRLOG("could not allocate ctrlr\n");
925 0 : return NULL;
926 : }
927 :
928 0 : pctrlr->is_remapped = false;
929 0 : pctrlr->ctrlr.is_removed = false;
930 0 : pctrlr->devhandle = devhandle;
931 0 : pctrlr->ctrlr.opts = *opts;
932 0 : pctrlr->ctrlr.trid = *trid;
933 0 : pctrlr->ctrlr.opts.admin_queue_size = spdk_max(pctrlr->ctrlr.opts.admin_queue_size,
934 : NVME_PCIE_MIN_ADMIN_QUEUE_SIZE);
935 0 : pci_id = spdk_pci_device_get_id(pci_dev);
936 0 : pctrlr->ctrlr.quirks = nvme_get_quirks(&pci_id);
937 0 : if (pci_dev->numa_id != SPDK_ENV_NUMA_ID_ANY) {
938 0 : pctrlr->ctrlr.numa.id_valid = 1;
939 0 : pctrlr->ctrlr.numa.id = pci_dev->numa_id;
940 : }
941 :
942 0 : rc = nvme_ctrlr_construct(&pctrlr->ctrlr);
943 0 : if (rc != 0) {
944 0 : spdk_pci_device_unclaim(pci_dev);
945 0 : spdk_free(pctrlr);
946 0 : return NULL;
947 : }
948 :
949 0 : rc = nvme_pcie_ctrlr_allocate_bars(pctrlr);
950 0 : if (rc != 0) {
951 0 : spdk_pci_device_unclaim(pci_dev);
952 0 : spdk_free(pctrlr);
953 0 : return NULL;
954 : }
955 :
956 : /* Enable PCI busmaster and disable INTx */
957 0 : spdk_pci_device_cfg_read16(pci_dev, &cmd_reg, 4);
958 0 : cmd_reg |= 0x404;
959 0 : spdk_pci_device_cfg_write16(pci_dev, cmd_reg, 4);
960 :
961 0 : if (nvme_ctrlr_get_cap(&pctrlr->ctrlr, &cap)) {
962 0 : SPDK_ERRLOG("get_cap() failed\n");
963 0 : spdk_pci_device_unclaim(pci_dev);
964 0 : spdk_free(pctrlr);
965 0 : return NULL;
966 : }
967 :
968 : /* Doorbell stride is 2 ^ (dstrd + 2),
969 : * but we want multiples of 4, so drop the + 2 */
970 0 : pctrlr->doorbell_stride_u32 = 1 << cap.bits.dstrd;
971 :
972 0 : rc = nvme_pcie_ctrlr_construct_admin_qpair(&pctrlr->ctrlr, pctrlr->ctrlr.opts.admin_queue_size);
973 0 : if (rc != 0) {
974 0 : nvme_ctrlr_destruct(&pctrlr->ctrlr);
975 0 : return NULL;
976 : }
977 :
978 : /* Construct the primary process properties */
979 0 : rc = nvme_ctrlr_add_process(&pctrlr->ctrlr, pci_dev);
980 0 : if (rc != 0) {
981 0 : nvme_ctrlr_destruct(&pctrlr->ctrlr);
982 0 : return NULL;
983 : }
984 :
985 0 : if (g_sigset != true) {
986 0 : spdk_pci_register_error_handler(nvme_sigbus_fault_sighandler,
987 : NULL);
988 0 : g_sigset = true;
989 : }
990 :
991 0 : return &pctrlr->ctrlr;
992 : }
993 :
994 : static int
995 0 : nvme_pcie_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
996 : {
997 0 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
998 0 : struct nvme_pcie_qpair *padminq = nvme_pcie_qpair(ctrlr->adminq);
999 0 : union spdk_nvme_aqa_register aqa;
1000 :
1001 0 : if (nvme_pcie_ctrlr_set_asq(pctrlr, padminq->cmd_bus_addr)) {
1002 0 : SPDK_ERRLOG("set_asq() failed\n");
1003 0 : return -EIO;
1004 : }
1005 :
1006 0 : if (nvme_pcie_ctrlr_set_acq(pctrlr, padminq->cpl_bus_addr)) {
1007 0 : SPDK_ERRLOG("set_acq() failed\n");
1008 0 : return -EIO;
1009 : }
1010 :
1011 0 : aqa.raw = 0;
1012 : /* acqs and asqs are 0-based. */
1013 0 : aqa.bits.acqs = nvme_pcie_qpair(ctrlr->adminq)->num_entries - 1;
1014 0 : aqa.bits.asqs = nvme_pcie_qpair(ctrlr->adminq)->num_entries - 1;
1015 :
1016 0 : if (nvme_pcie_ctrlr_set_aqa(pctrlr, &aqa)) {
1017 0 : SPDK_ERRLOG("set_aqa() failed\n");
1018 0 : return -EIO;
1019 : }
1020 :
1021 0 : return 0;
1022 : }
1023 :
1024 : static int
1025 0 : nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
1026 : {
1027 0 : struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
1028 0 : struct spdk_pci_device *devhandle = nvme_ctrlr_proc_get_devhandle(ctrlr);
1029 :
1030 0 : if (ctrlr->adminq) {
1031 0 : nvme_pcie_qpair_destroy(ctrlr->adminq);
1032 : }
1033 :
1034 0 : nvme_ctrlr_destruct_finish(ctrlr);
1035 :
1036 0 : nvme_pcie_ctrlr_free_bars(pctrlr);
1037 :
1038 0 : if (devhandle) {
1039 0 : if (ctrlr->opts.enable_interrupts) {
1040 0 : spdk_pci_device_disable_interrupts(devhandle);
1041 : }
1042 0 : spdk_pci_device_unclaim(devhandle);
1043 0 : spdk_pci_device_detach(devhandle);
1044 : }
1045 :
1046 0 : spdk_free(pctrlr);
1047 :
1048 0 : return 0;
1049 : }
1050 :
1051 : static int
1052 0 : nvme_pcie_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr)
1053 : {
1054 0 : struct spdk_pci_device *devhandle = nvme_ctrlr_proc_get_devhandle(ctrlr);
1055 : int rc;
1056 :
1057 0 : assert(devhandle != NULL);
1058 0 : rc = spdk_pci_device_enable_interrupts(devhandle, ctrlr->opts.num_io_queues);
1059 0 : if (rc) {
1060 0 : SPDK_ERRLOG("enable_interrupts() failed\n");
1061 0 : return -EIO;
1062 : }
1063 :
1064 0 : return 0;
1065 : }
1066 :
1067 : static int
1068 0 : nvme_pcie_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
1069 : int (*iter_fn)(struct nvme_request *req, void *arg),
1070 : void *arg)
1071 : {
1072 0 : struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
1073 : struct nvme_tracker *tr, *tmp;
1074 : int rc;
1075 :
1076 0 : assert(iter_fn != NULL);
1077 :
1078 0 : TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) {
1079 0 : assert(tr->req != NULL);
1080 :
1081 0 : rc = iter_fn(tr->req, arg);
1082 0 : if (rc != 0) {
1083 0 : return rc;
1084 : }
1085 : }
1086 :
1087 0 : return 0;
1088 : }
1089 :
1090 : void
1091 0 : spdk_nvme_pcie_set_hotplug_filter(spdk_nvme_pcie_hotplug_filter_cb filter_cb)
1092 : {
1093 0 : g_hotplug_filter_cb = filter_cb;
1094 0 : }
1095 :
1096 : static struct spdk_pci_id nvme_pci_driver_id[] = {
1097 : {
1098 : .class_id = SPDK_PCI_CLASS_NVME,
1099 : .vendor_id = SPDK_PCI_ANY_ID,
1100 : .device_id = SPDK_PCI_ANY_ID,
1101 : .subvendor_id = SPDK_PCI_ANY_ID,
1102 : .subdevice_id = SPDK_PCI_ANY_ID,
1103 : },
1104 : { .vendor_id = 0, /* sentinel */ },
1105 : };
1106 :
1107 1 : SPDK_PCI_DRIVER_REGISTER(nvme, nvme_pci_driver_id,
1108 : SPDK_PCI_DRIVER_NEED_MAPPING | SPDK_PCI_DRIVER_WC_ACTIVATE);
1109 :
1110 : const struct spdk_nvme_transport_ops pcie_ops = {
1111 : .name = "PCIE",
1112 : .type = SPDK_NVME_TRANSPORT_PCIE,
1113 : .ctrlr_construct = nvme_pcie_ctrlr_construct,
1114 : .ctrlr_scan = nvme_pcie_ctrlr_scan,
1115 : .ctrlr_scan_attached = nvme_pci_ctrlr_scan_attached,
1116 : .ctrlr_destruct = nvme_pcie_ctrlr_destruct,
1117 : .ctrlr_enable = nvme_pcie_ctrlr_enable,
1118 : .ctrlr_enable_interrupts = nvme_pcie_ctrlr_enable_interrupts,
1119 :
1120 : .ctrlr_get_registers = nvme_pcie_ctrlr_get_registers,
1121 : .ctrlr_set_reg_4 = nvme_pcie_ctrlr_set_reg_4,
1122 : .ctrlr_set_reg_8 = nvme_pcie_ctrlr_set_reg_8,
1123 : .ctrlr_get_reg_4 = nvme_pcie_ctrlr_get_reg_4,
1124 : .ctrlr_get_reg_8 = nvme_pcie_ctrlr_get_reg_8,
1125 :
1126 : .ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size,
1127 : .ctrlr_get_max_sges = nvme_pcie_ctrlr_get_max_sges,
1128 :
1129 : .ctrlr_reserve_cmb = nvme_pcie_ctrlr_reserve_cmb,
1130 : .ctrlr_map_cmb = nvme_pcie_ctrlr_map_io_cmb,
1131 : .ctrlr_unmap_cmb = nvme_pcie_ctrlr_unmap_io_cmb,
1132 :
1133 : .ctrlr_enable_pmr = nvme_pcie_ctrlr_enable_pmr,
1134 : .ctrlr_disable_pmr = nvme_pcie_ctrlr_disable_pmr,
1135 : .ctrlr_map_pmr = nvme_pcie_ctrlr_map_io_pmr,
1136 : .ctrlr_unmap_pmr = nvme_pcie_ctrlr_unmap_io_pmr,
1137 :
1138 : .ctrlr_create_io_qpair = nvme_pcie_ctrlr_create_io_qpair,
1139 : .ctrlr_delete_io_qpair = nvme_pcie_ctrlr_delete_io_qpair,
1140 : .ctrlr_connect_qpair = nvme_pcie_ctrlr_connect_qpair,
1141 : .ctrlr_disconnect_qpair = nvme_pcie_ctrlr_disconnect_qpair,
1142 :
1143 : .qpair_abort_reqs = nvme_pcie_qpair_abort_reqs,
1144 : .qpair_reset = nvme_pcie_qpair_reset,
1145 : .qpair_submit_request = nvme_pcie_qpair_submit_request,
1146 : .qpair_process_completions = nvme_pcie_qpair_process_completions,
1147 : .qpair_iterate_requests = nvme_pcie_qpair_iterate_requests,
1148 : .qpair_get_fd = nvme_pcie_qpair_get_fd,
1149 : .admin_qpair_abort_aers = nvme_pcie_admin_qpair_abort_aers,
1150 :
1151 : .poll_group_create = nvme_pcie_poll_group_create,
1152 : .poll_group_connect_qpair = nvme_pcie_poll_group_connect_qpair,
1153 : .poll_group_disconnect_qpair = nvme_pcie_poll_group_disconnect_qpair,
1154 : .poll_group_add = nvme_pcie_poll_group_add,
1155 : .poll_group_remove = nvme_pcie_poll_group_remove,
1156 : .poll_group_process_completions = nvme_pcie_poll_group_process_completions,
1157 : .poll_group_check_disconnected_qpairs = nvme_pcie_poll_group_check_disconnected_qpairs,
1158 : .poll_group_destroy = nvme_pcie_poll_group_destroy,
1159 : .poll_group_get_stats = nvme_pcie_poll_group_get_stats,
1160 : .poll_group_free_stats = nvme_pcie_poll_group_free_stats
1161 : };
1162 :
1163 1 : SPDK_NVME_TRANSPORT_REGISTER(pcie, &pcie_ops);
|