2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2015 Netflix, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
42 #include <sys/fcntl.h>
46 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_sim.h>
54 #include <cam/cam_xpt.h>
55 #include <cam/cam_xpt_sim.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_xpt_internal.h>
58 #include <cam/cam_debug.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/nvme/nvme_all.h>
63 #include <machine/stdarg.h> /* for xpt_print below */
66 struct nvme_quirk_entry {
68 #define CAM_QUIRK_MAXTAGS 1
73 /* Not even sure why we need this */
74 static periph_init_t nvme_probe_periph_init;
76 static struct periph_driver nvme_probe_driver =
78 nvme_probe_periph_init, "nvme_probe",
79 TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0,
83 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver);
86 NVME_PROBE_IDENTIFY_CD,
87 NVME_PROBE_IDENTIFY_NS,
92 static char *nvme_probe_action_text[] = {
93 "NVME_PROBE_IDENTIFY_CD",
94 "NVME_PROBE_IDENTIFY_NS",
99 #define NVME_PROBE_SET_ACTION(softc, newaction) \
102 text = nvme_probe_action_text; \
103 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE, \
104 ("Probe %s to %s\n", text[(softc)->action], \
105 text[(newaction)])); \
106 (softc)->action = (newaction); \
110 NVME_PROBE_NO_ANNOUNCE = 0x04
114 TAILQ_HEAD(, ccb_hdr) request_ccbs;
116 struct nvme_controller_data cd;
117 struct nvme_namespace_data ns;
119 nvme_probe_action action;
120 nvme_probe_flags flags;
122 struct cam_periph *periph;
125 static struct nvme_quirk_entry nvme_quirk_table[] =
129 // T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
130 // /*vendor*/"*", /*product*/"*", /*revision*/"*"
132 .quirks = 0, .mintags = 0, .maxtags = 0
136 static const int nvme_quirk_table_size =
137 sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table);
139 static cam_status nvme_probe_register(struct cam_periph *periph,
141 static void nvme_probe_schedule(struct cam_periph *nvme_probe_periph);
142 static void nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb);
143 static void nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb);
144 static void nvme_probe_cleanup(struct cam_periph *periph);
145 //static void nvme_find_quirk(struct cam_ed *device);
146 static void nvme_scan_lun(struct cam_periph *periph,
147 struct cam_path *path, cam_flags flags,
149 static struct cam_ed *
150 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target,
152 static void nvme_device_transport(struct cam_path *path);
153 static void nvme_dev_async(uint32_t async_code,
155 struct cam_et *target,
156 struct cam_ed *device,
158 static void nvme_action(union ccb *start_ccb);
159 static void nvme_announce_periph_sbuf(struct cam_periph *periph,
161 static void nvme_proto_announce_sbuf(struct cam_ed *device,
163 static void nvme_proto_denounce_sbuf(struct cam_ed *device,
165 static void nvme_proto_debug_out(union ccb *ccb);
167 static struct xpt_xport_ops nvme_xport_ops = {
168 .alloc_device = nvme_alloc_device,
169 .action = nvme_action,
170 .async = nvme_dev_async,
171 .announce_sbuf = nvme_announce_periph_sbuf,
173 #define NVME_XPT_XPORT(x, X) \
174 static struct xpt_xport nvme_xport_ ## x = { \
175 .xport = XPORT_ ## X, \
177 .ops = &nvme_xport_ops, \
179 CAM_XPT_XPORT(nvme_xport_ ## x);
181 NVME_XPT_XPORT(nvme, NVME);
183 #undef NVME_XPT_XPORT
185 static struct xpt_proto_ops nvme_proto_ops = {
186 .announce_sbuf = nvme_proto_announce_sbuf,
187 .denounce_sbuf = nvme_proto_denounce_sbuf,
188 .debug_out = nvme_proto_debug_out,
190 static struct xpt_proto nvme_proto = {
193 .ops = &nvme_proto_ops,
195 CAM_XPT_PROTO(nvme_proto);
198 nvme_probe_periph_init(void)
203 nvme_probe_register(struct cam_periph *periph, void *arg)
205 union ccb *request_ccb; /* CCB representing the probe request */
206 nvme_probe_softc *softc;
208 request_ccb = (union ccb *)arg;
209 if (request_ccb == NULL) {
210 printf("nvme_probe_register: no probe CCB, "
211 "can't register device\n");
212 return(CAM_REQ_CMP_ERR);
215 softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT);
218 printf("nvme_probe_register: Unable to probe new device. "
219 "Unable to allocate softc\n");
220 return(CAM_REQ_CMP_ERR);
222 TAILQ_INIT(&softc->request_ccbs);
223 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
226 periph->softc = softc;
227 softc->periph = periph;
228 softc->action = NVME_PROBE_INVALID;
229 if (cam_periph_acquire(periph) != 0)
230 return (CAM_REQ_CMP_ERR);
232 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
234 // nvme_device_transport(periph->path);
235 nvme_probe_schedule(periph);
241 nvme_probe_schedule(struct cam_periph *periph)
244 nvme_probe_softc *softc;
246 softc = (nvme_probe_softc *)periph->softc;
247 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
249 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
251 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
252 softc->flags |= NVME_PROBE_NO_ANNOUNCE;
254 softc->flags &= ~NVME_PROBE_NO_ANNOUNCE;
256 xpt_schedule(periph, CAM_PRIORITY_XPT);
260 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
262 struct ccb_nvmeio *nvmeio;
263 nvme_probe_softc *softc;
266 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n"));
268 softc = (nvme_probe_softc *)periph->softc;
269 nvmeio = &start_ccb->nvmeio;
270 lun = xpt_path_lun_id(periph->path);
272 if (softc->restart) {
274 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
277 switch (softc->action) {
278 case NVME_PROBE_IDENTIFY_CD:
279 cam_fill_nvmeadmin(nvmeio,
281 nvme_probe_done, /* cbfcnp */
282 CAM_DIR_IN, /* flags */
283 (uint8_t *)&softc->cd, /* data_ptr */
284 sizeof(softc->cd), /* dxfer_len */
285 30 * 1000); /* timeout 30s */
286 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0,
289 case NVME_PROBE_IDENTIFY_NS:
290 cam_fill_nvmeadmin(nvmeio,
292 nvme_probe_done, /* cbfcnp */
293 CAM_DIR_IN, /* flags */
294 (uint8_t *)&softc->ns, /* data_ptr */
295 sizeof(softc->ns), /* dxfer_len */
296 30 * 1000); /* timeout 30s */
297 nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun,
301 panic("nvme_probe_start: invalid action state 0x%x\n", softc->action);
303 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
304 xpt_action(start_ccb);
308 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb)
310 struct nvme_namespace_data *nvme_data;
311 struct nvme_controller_data *nvme_cdata;
312 nvme_probe_softc *softc;
313 struct cam_path *path;
314 struct scsi_vpd_device_id *did;
315 struct scsi_vpd_id_descriptor *idd;
317 int found = 1, e, g, len;
319 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n"));
321 softc = (nvme_probe_softc *)periph->softc;
322 path = done_ccb->ccb_h.path;
323 priority = done_ccb->ccb_h.pinfo.priority;
325 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
326 if (cam_periph_error(done_ccb,
327 0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0
330 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
331 cam_release_devq(path, 0, 0, 0, FALSE);
334 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
335 /* Don't wedge the queue */
336 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
340 * If we get to this point, we got an error status back
341 * from the inquiry and the error status doesn't require
342 * automatically retrying the command. Therefore, the
343 * inquiry failed. If we had inquiry information before
344 * for this device, but this latest inquiry command failed,
345 * the device has probably gone away. If this device isn't
346 * already marked unconfigured, notify the peripheral
347 * drivers that this device is no more.
349 device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
350 xpt_async(AC_LOST_DEVICE, path, NULL);
351 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID);
357 switch (softc->action) {
358 case NVME_PROBE_IDENTIFY_CD:
359 nvme_controller_data_swapbytes(&softc->cd);
361 nvme_cdata = path->device->nvme_cdata;
362 if (nvme_cdata == NULL) {
363 nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT,
365 if (nvme_cdata == NULL) {
366 xpt_print(path, "Can't allocate memory");
370 bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata));
371 path->device->nvme_cdata = nvme_cdata;
373 /* Save/update serial number. */
374 if (path->device->serial_num != NULL) {
375 free(path->device->serial_num, M_CAMXPT);
376 path->device->serial_num = NULL;
377 path->device->serial_num_len = 0;
379 path->device->serial_num = (uint8_t *)
380 malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT);
381 if (path->device->serial_num != NULL) {
382 cam_strvis_flag(path->device->serial_num,
383 nvme_cdata->sn, sizeof(nvme_cdata->sn),
384 NVME_SERIAL_NUMBER_LENGTH + 1,
385 CAM_STRVIS_FLAG_NONASCII_SPC);
387 path->device->serial_num_len =
388 strlen(path->device->serial_num);
391 // nvme_find_quirk(path->device);
392 nvme_device_transport(path);
393 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS);
394 xpt_release_ccb(done_ccb);
395 xpt_schedule(periph, priority);
397 case NVME_PROBE_IDENTIFY_NS:
398 nvme_namespace_data_swapbytes(&softc->ns);
400 /* Check that the namespace exists. */
401 if (softc->ns.nsze == 0)
404 nvme_data = path->device->nvme_data;
405 if (nvme_data == NULL) {
406 nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT,
408 if (nvme_data == NULL) {
409 xpt_print(path, "Can't allocate memory");
413 bcopy(&softc->ns, nvme_data, sizeof(*nvme_data));
414 path->device->nvme_data = nvme_data;
416 /* Save/update device_id based on NGUID and/or EUI64. */
417 if (path->device->device_id != NULL) {
418 free(path->device->device_id, M_CAMXPT);
419 path->device->device_id = NULL;
420 path->device->device_id_len = 0;
423 for (g = 0; g < sizeof(nvme_data->nguid); g++) {
424 if (nvme_data->nguid[g] != 0)
427 if (g < sizeof(nvme_data->nguid))
428 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
429 for (e = 0; e < sizeof(nvme_data->eui64); e++) {
430 if (nvme_data->eui64[e] != 0)
433 if (e < sizeof(nvme_data->eui64))
434 len += sizeof(struct scsi_vpd_id_descriptor) + 8;
436 path->device->device_id = (uint8_t *)
437 malloc(SVPD_DEVICE_ID_HDR_LEN + len,
440 if (path->device->device_id != NULL) {
441 did = (struct scsi_vpd_device_id *)path->device->device_id;
442 did->device = SID_QUAL_LU_CONNECTED | T_DIRECT;
443 did->page_code = SVPD_DEVICE_ID;
444 scsi_ulto2b(len, did->length);
445 idd = (struct scsi_vpd_id_descriptor *)(did + 1);
446 if (g < sizeof(nvme_data->nguid)) {
447 idd->proto_codeset = SVPD_ID_CODESET_BINARY;
448 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
450 bcopy(nvme_data->nguid, idd->identifier, 16);
451 idd = (struct scsi_vpd_id_descriptor *)
452 &idd->identifier[16];
454 if (e < sizeof(nvme_data->eui64)) {
455 idd->proto_codeset = SVPD_ID_CODESET_BINARY;
456 idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
458 bcopy(nvme_data->eui64, idd->identifier, 8);
460 path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len;
463 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
464 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
465 xpt_acquire_device(path->device);
466 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
467 xpt_action(done_ccb);
468 xpt_async(AC_FOUND_DEVICE, path, done_ccb);
470 NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
473 panic("nvme_probe_done: invalid action state 0x%x\n", softc->action);
476 if (softc->restart) {
478 xpt_release_ccb(done_ccb);
479 nvme_probe_schedule(periph);
482 xpt_release_ccb(done_ccb);
483 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
484 while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
485 TAILQ_REMOVE(&softc->request_ccbs,
486 &done_ccb->ccb_h, periph_links.tqe);
487 done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR;
490 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
491 cam_release_devq(path, 0, 0, 0, FALSE);
492 cam_periph_invalidate(periph);
493 cam_periph_release_locked(periph);
497 nvme_probe_cleanup(struct cam_periph *periph)
500 free(periph->softc, M_CAMXPT);
504 /* XXX should be used, don't delete */
506 nvme_find_quirk(struct cam_ed *device)
508 struct nvme_quirk_entry *quirk;
511 match = cam_quirkmatch((caddr_t)&device->nvme_data,
512 (caddr_t)nvme_quirk_table,
513 nvme_quirk_table_size,
514 sizeof(*nvme_quirk_table), nvme_identify_match);
517 panic("xpt_find_quirk: device didn't match wildcard entry!!");
519 quirk = (struct nvme_quirk_entry *)match;
520 device->quirk = quirk;
521 if (quirk->quirks & CAM_QUIRK_MAXTAGS) {
522 device->mintags = quirk->mintags;
523 device->maxtags = quirk->maxtags;
529 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path,
530 cam_flags flags, union ccb *request_ccb)
532 struct ccb_pathinq cpi;
534 struct cam_periph *old_periph;
537 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n"));
539 xpt_path_inq(&cpi, path);
541 if (cpi.ccb_h.status != CAM_REQ_CMP) {
542 if (request_ccb != NULL) {
543 request_ccb->ccb_h.status = cpi.ccb_h.status;
544 xpt_done(request_ccb);
549 if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) {
550 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n"));
551 request_ccb->ccb_h.status = CAM_REQ_CMP; /* XXX signal error ? */
552 xpt_done(request_ccb);
556 lock = (xpt_path_owned(path) == 0);
559 if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) {
560 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
561 nvme_probe_softc *softc;
563 softc = (nvme_probe_softc *)old_periph->softc;
564 TAILQ_INSERT_TAIL(&softc->request_ccbs,
565 &request_ccb->ccb_h, periph_links.tqe);
567 CAM_DEBUG(path, CAM_DEBUG_TRACE,
568 ("restarting nvme_probe device\n"));
570 request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
571 CAM_DEBUG(path, CAM_DEBUG_TRACE,
572 ("Failing to restart nvme_probe device\n"));
573 xpt_done(request_ccb);
576 CAM_DEBUG(path, CAM_DEBUG_TRACE,
577 ("Adding nvme_probe device\n"));
578 status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup,
579 nvme_probe_start, "nvme_probe",
581 request_ccb->ccb_h.path, NULL, 0,
584 if (status != CAM_REQ_CMP) {
585 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
586 "returned an error, can't continue probe\n");
587 request_ccb->ccb_h.status = status;
588 xpt_done(request_ccb);
592 xpt_path_unlock(path);
595 static struct cam_ed *
596 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
598 struct nvme_quirk_entry *quirk;
599 struct cam_ed *device;
601 device = xpt_alloc_device(bus, target, lun_id);
606 * Take the default quirk entry until we have inquiry
607 * data from nvme and can determine a better quirk to use.
609 quirk = &nvme_quirk_table[nvme_quirk_table_size - 1];
610 device->quirk = (void *)quirk;
613 device->inq_flags = 0;
614 device->queue_flags = 0;
615 device->device_id = NULL;
616 device->device_id_len = 0;
617 device->serial_num = NULL;
618 device->serial_num_len = 0;
623 nvme_device_transport(struct cam_path *path)
625 struct ccb_pathinq cpi;
626 struct ccb_trans_settings cts;
627 /* XXX get data from nvme namespace and other info ??? */
629 /* Get transport information from the SIM */
630 xpt_path_inq(&cpi, path);
632 path->device->transport = cpi.transport;
633 path->device->transport_version = cpi.transport_version;
635 path->device->protocol = cpi.protocol;
636 path->device->protocol_version = cpi.protocol_version;
638 /* Tell the controller what we think */
639 memset(&cts, 0, sizeof(cts));
640 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
641 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
642 cts.type = CTS_TYPE_CURRENT_SETTINGS;
643 cts.transport = path->device->transport;
644 cts.transport_version = path->device->transport_version;
645 cts.protocol = path->device->protocol;
646 cts.protocol_version = path->device->protocol_version;
647 cts.proto_specific.valid = 0;
648 cts.xport_specific.valid = 0;
649 xpt_action((union ccb *)&cts);
653 nvme_dev_advinfo(union ccb *start_ccb)
655 struct cam_ed *device;
656 struct ccb_dev_advinfo *cdai;
659 xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED);
660 start_ccb->ccb_h.status = CAM_REQ_INVALID;
661 device = start_ccb->ccb_h.path->device;
662 cdai = &start_ccb->cdai;
663 switch(cdai->buftype) {
664 case CDAI_TYPE_SCSI_DEVID:
665 if (cdai->flags & CDAI_FLAG_STORE)
667 cdai->provsiz = device->device_id_len;
668 if (device->device_id_len == 0)
670 amt = device->device_id_len;
671 if (cdai->provsiz > cdai->bufsiz)
673 memcpy(cdai->buf, device->device_id, amt);
675 case CDAI_TYPE_SERIAL_NUM:
676 if (cdai->flags & CDAI_FLAG_STORE)
678 cdai->provsiz = device->serial_num_len;
679 if (device->serial_num_len == 0)
681 amt = device->serial_num_len;
682 if (cdai->provsiz > cdai->bufsiz)
684 memcpy(cdai->buf, device->serial_num, amt);
686 case CDAI_TYPE_PHYS_PATH:
687 if (cdai->flags & CDAI_FLAG_STORE) {
688 if (device->physpath != NULL) {
689 free(device->physpath, M_CAMXPT);
690 device->physpath = NULL;
691 device->physpath_len = 0;
693 /* Clear existing buffer if zero length */
694 if (cdai->bufsiz == 0)
696 device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
697 if (device->physpath == NULL) {
698 start_ccb->ccb_h.status = CAM_REQ_ABORTED;
701 device->physpath_len = cdai->bufsiz;
702 memcpy(device->physpath, cdai->buf, cdai->bufsiz);
704 cdai->provsiz = device->physpath_len;
705 if (device->physpath_len == 0)
707 amt = device->physpath_len;
708 if (cdai->provsiz > cdai->bufsiz)
710 memcpy(cdai->buf, device->physpath, amt);
713 case CDAI_TYPE_NVME_CNTRL:
714 if (cdai->flags & CDAI_FLAG_STORE)
716 amt = sizeof(struct nvme_controller_data);
718 if (amt > cdai->bufsiz)
720 memcpy(cdai->buf, device->nvme_cdata, amt);
722 case CDAI_TYPE_NVME_NS:
723 if (cdai->flags & CDAI_FLAG_STORE)
725 amt = sizeof(struct nvme_namespace_data);
727 if (amt > cdai->bufsiz)
729 memcpy(cdai->buf, device->nvme_data, amt);
734 start_ccb->ccb_h.status = CAM_REQ_CMP;
736 if (cdai->flags & CDAI_FLAG_STORE) {
737 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
738 (void *)(uintptr_t)cdai->buftype);
743 nvme_action(union ccb *start_ccb)
745 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
746 ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code));
748 switch (start_ccb->ccb_h.func_code) {
752 nvme_scan_lun(start_ccb->ccb_h.path->periph,
753 start_ccb->ccb_h.path, start_ccb->crcn.flags,
756 case XPT_DEV_ADVINFO:
757 nvme_dev_advinfo(start_ccb);
761 xpt_action_default(start_ccb);
767 * Handle any per-device event notifications that require action by the XPT.
770 nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target,
771 struct cam_ed *device, void *async_arg)
775 * We only need to handle events for real devices.
777 if (target->target_id == CAM_TARGET_WILDCARD
778 || device->lun_id == CAM_LUN_WILDCARD)
781 if (async_code == AC_LOST_DEVICE &&
782 (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
783 device->flags |= CAM_DEV_UNCONFIGURED;
784 xpt_release_device(device);
789 nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
791 struct ccb_pathinq cpi;
792 struct ccb_trans_settings cts;
793 struct cam_path *path = periph->path;
794 struct ccb_trans_settings_nvme *nvmex;
796 cam_periph_assert(periph, MA_OWNED);
798 /* Ask the SIM for connection details */
799 memset(&cts, 0, sizeof(cts));
800 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
801 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
802 cts.type = CTS_TYPE_CURRENT_SETTINGS;
803 xpt_action((union ccb*)&cts);
804 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
807 /* Ask the SIM for its base transfer speed */
808 xpt_path_inq(&cpi, periph->path);
809 sbuf_printf(sb, "%s%d: nvme version %d.%d",
810 periph->periph_name, periph->unit_number,
811 NVME_MAJOR(cts.protocol_version),
812 NVME_MINOR(cts.protocol_version));
813 if (cts.transport == XPORT_NVME) {
814 nvmex = &cts.proto_specific.nvme;
815 if (nvmex->valid & CTS_NVME_VALID_LINK)
817 " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link",
818 nvmex->lanes, nvmex->max_lanes,
819 nvmex->speed, nvmex->max_speed);
821 sbuf_printf(sb, "\n");
825 nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb)
827 nvme_print_ident(device->nvme_cdata, device->nvme_data, sb);
831 nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb)
833 nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb);
837 nvme_proto_debug_out(union ccb *ccb)
839 char cdb_str[(sizeof(struct nvme_command) * 3) + 1];
841 if (ccb->ccb_h.func_code != XPT_NVME_IO &&
842 ccb->ccb_h.func_code != XPT_NVME_ADMIN)
845 CAM_DEBUG(ccb->ccb_h.path,
846 CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd,
847 ccb->ccb_h.func_code == XPT_NVME_ADMIN),
848 nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str))));