2 * Copyright (c) 2016 Netflix, Inc
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer,
9 * without modification, immediately at the beginning of the file.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
29 #include <sys/param.h>
30 #include <sys/systm.h>
34 #include <sys/ioccom.h>
35 #include <sys/malloc.h>
40 #include <cam/cam_ccb.h>
41 #include <cam/cam_sim.h>
42 #include <cam/cam_xpt_sim.h>
43 #include <cam/cam_debug.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
48 #include "nvme_private.h"
50 #define ccb_accb_ptr spriv_ptr0
51 #define ccb_ctrlr_ptr spriv_ptr1
52 static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb);
53 static void nvme_sim_poll(struct cam_sim *sim);
55 #define sim2softc(sim) ((struct nvme_sim_softc *)cam_sim_softc(sim))
56 #define sim2ctrlr(sim) (sim2softc(sim)->s_ctrlr)
60 struct nvme_controller *s_ctrlr;
61 struct cam_sim *s_sim;
62 struct cam_path *s_path;
66 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl)
68 union ccb *ccb = (union ccb *)ccb_arg;
71 * Let the periph know the completion, and let it sort out what
72 * it means. Make our best guess, though for the status code.
74 memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl));
75 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
76 if (nvme_completion_is_error(cpl)) {
77 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
80 ccb->ccb_h.status = CAM_REQ_CMP;
86 nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
88 struct ccb_nvmeio *nvmeio = &ccb->nvmeio;
89 struct nvme_request *req;
92 struct nvme_controller *ctrlr;
94 ctrlr = sim2ctrlr(sim);
95 payload = nvmeio->data_ptr;
96 size = nvmeio->dxfer_len;
98 if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
99 req = nvme_allocate_request_bio((struct bio *)payload,
100 nvme_sim_nvmeio_done, ccb);
101 else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG)
102 req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb);
103 else if (payload == NULL)
104 req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
106 req = nvme_allocate_request_vaddr(payload, size,
107 nvme_sim_nvmeio_done, ccb);
110 nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
114 ccb->ccb_h.status |= CAM_SIM_QUEUED;
116 memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));
118 if (ccb->ccb_h.func_code == XPT_NVME_IO)
119 nvme_ctrlr_submit_io_request(ctrlr, req);
121 nvme_ctrlr_submit_admin_request(ctrlr, req);
125 nvme_link_kBps(struct nvme_controller *ctrlr)
127 uint32_t speed, lanes, link[] = { 1, 250000, 500000, 985000, 1970000 };
130 status = pcie_read_config(ctrlr->dev, PCIER_LINK_STA, 2);
131 speed = status & PCIEM_LINK_STA_SPEED;
132 lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
134 * Failsafe on link speed indicator. If it is insane report the number of
135 * lanes as the speed. Not 100% accurate, but may be diagnostic.
137 if (speed >= nitems(link))
139 return link[speed] * lanes;
143 nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
145 struct nvme_controller *ctrlr;
147 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
148 ("nvme_sim_action: func= %#x\n",
149 ccb->ccb_h.func_code));
151 ctrlr = sim2ctrlr(sim);
153 mtx_assert(&ctrlr->lock, MA_OWNED);
155 switch (ccb->ccb_h.func_code) {
156 case XPT_CALC_GEOMETRY: /* Calculate Geometry Totally nuts ? XXX */
158 * Only meaningful for old-school SCSI disks since only the SCSI
159 * da driver generates them. Reject all these that slip through.
162 case XPT_ABORT: /* Abort the specified CCB */
163 ccb->ccb_h.status = CAM_REQ_INVALID;
165 case XPT_SET_TRAN_SETTINGS:
167 * NVMe doesn't really have different transfer settings, but
168 * other parts of CAM think failure here is a big deal.
170 ccb->ccb_h.status = CAM_REQ_CMP;
172 case XPT_PATH_INQ: /* Path routing inquiry */
174 struct ccb_pathinq *cpi = &ccb->cpi;
175 device_t dev = ctrlr->dev;
178 * NVMe may have multiple LUNs on the same path. Current generation
179 * of NVMe devives support only a single name space. Multiple name
180 * space drives are coming, but it's unclear how we should report
183 cpi->version_num = 1;
184 cpi->hba_inquiry = 0;
185 cpi->target_sprt = 0;
186 cpi->hba_misc = PIM_UNMAPPED | PIM_NOSCAN;
187 cpi->hba_eng_cnt = 0;
189 cpi->max_lun = ctrlr->cdata.nn;
190 cpi->maxio = ctrlr->max_xfer_size;
191 cpi->initiator_id = 0;
192 cpi->bus_id = cam_sim_bus(sim);
193 cpi->base_transfer_speed = nvme_link_kBps(ctrlr);
194 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
195 strlcpy(cpi->hba_vid, "NVMe", HBA_IDLEN);
196 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
197 cpi->unit_number = cam_sim_unit(sim);
198 cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */
199 cpi->transport_version = nvme_mmio_read_4(ctrlr, vs);
200 cpi->protocol = PROTO_NVME;
201 cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs);
202 cpi->xport_specific.nvme.nsid = xpt_path_lun_id(ccb->ccb_h.path);
203 cpi->xport_specific.nvme.domain = pci_get_domain(dev);
204 cpi->xport_specific.nvme.bus = pci_get_bus(dev);
205 cpi->xport_specific.nvme.slot = pci_get_slot(dev);
206 cpi->xport_specific.nvme.function = pci_get_function(dev);
207 cpi->xport_specific.nvme.extra = 0;
208 cpi->ccb_h.status = CAM_REQ_CMP;
211 case XPT_GET_TRAN_SETTINGS: /* Get transport settings */
213 struct ccb_trans_settings *cts;
214 struct ccb_trans_settings_nvme *nvmep;
215 struct ccb_trans_settings_nvme *nvmex;
217 uint32_t status, caps;
221 nvmex = &cts->xport_specific.nvme;
222 nvmep = &cts->proto_specific.nvme;
224 status = pcie_read_config(dev, PCIER_LINK_STA, 2);
225 caps = pcie_read_config(dev, PCIER_LINK_CAP, 2);
226 nvmex->valid = CTS_NVME_VALID_SPEC | CTS_NVME_VALID_LINK;
227 nvmex->spec = nvme_mmio_read_4(ctrlr, vs);
228 nvmex->speed = status & PCIEM_LINK_STA_SPEED;
229 nvmex->lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
230 nvmex->max_speed = caps & PCIEM_LINK_CAP_MAX_SPEED;
231 nvmex->max_lanes = (caps & PCIEM_LINK_CAP_MAX_WIDTH) >> 4;
233 /* XXX these should be something else maybe ? */
235 nvmep->spec = nvmex->spec;
237 cts->transport = XPORT_NVME;
238 cts->protocol = PROTO_NVME;
239 cts->ccb_h.status = CAM_REQ_CMP;
242 case XPT_TERM_IO: /* Terminate the I/O process */
244 * every driver handles this, but nothing generates it. Assume
245 * it's OK to just say 'that worked'.
248 case XPT_RESET_DEV: /* Bus Device Reset the specified device */
249 case XPT_RESET_BUS: /* Reset the specified bus */
251 * NVMe doesn't really support physically resetting the bus. It's part
252 * of the bus scanning dance, so return sucess to tell the process to
255 ccb->ccb_h.status = CAM_REQ_CMP;
257 case XPT_NVME_IO: /* Execute the requested I/O operation */
258 case XPT_NVME_ADMIN: /* or Admin operation */
259 nvme_sim_nvmeio(sim, ccb);
260 return; /* no done */
262 ccb->ccb_h.status = CAM_REQ_INVALID;
269 nvme_sim_poll(struct cam_sim *sim)
272 nvme_ctrlr_poll(sim2ctrlr(sim));
276 nvme_sim_new_controller(struct nvme_controller *ctrlr)
278 struct nvme_sim_softc *sc;
279 struct cam_devq *devq;
282 max_trans = ctrlr->max_hw_pend_io;
283 devq = cam_simq_alloc(max_trans);
287 sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK);
290 sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll,
291 "nvme", sc, device_get_unit(ctrlr->dev),
292 &ctrlr->lock, max_trans, max_trans, devq);
293 if (sc->s_sim == NULL) {
294 printf("Failed to allocate a sim\n");
298 if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) {
299 printf("Failed to create a bus\n");
302 if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim),
303 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
304 printf("Failed to create a path\n");
311 xpt_bus_deregister(cam_sim_path(sc->s_sim));
313 cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
320 nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg)
322 struct nvme_sim_softc *sc = sc_arg;
323 struct nvme_controller *ctrlr = sc->s_ctrlr;
326 mtx_lock(&ctrlr->lock);
328 ccb = xpt_alloc_ccb_nowait();
330 printf("unable to alloc CCB for rescan\n");
334 if (xpt_create_path(&ccb->ccb_h.path, /*periph*/NULL,
335 cam_sim_path(sc->s_sim), 0, ns->id) != CAM_REQ_CMP) {
336 printf("unable to create path for rescan\n");
343 mtx_unlock(&ctrlr->lock);
349 nvme_sim_controller_fail(void *ctrlr_arg)
351 struct nvme_sim_softc *sc = ctrlr_arg;
352 struct nvme_controller *ctrlr = sc->s_ctrlr;
354 mtx_lock(&ctrlr->lock);
355 xpt_async(AC_LOST_DEVICE, sc->s_path, NULL);
356 xpt_free_path(sc->s_path);
357 xpt_bus_deregister(cam_sim_path(sc->s_sim));
358 cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
359 mtx_unlock(&ctrlr->lock);
363 struct nvme_consumer *consumer_cookie;
371 consumer_cookie = nvme_register_consumer(nvme_sim_new_ns,
372 nvme_sim_new_controller, NULL, nvme_sim_controller_fail);
375 SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY,
376 nvme_sim_init, NULL);
379 nvme_sim_uninit(void)
385 nvme_unregister_consumer(consumer_cookie);
388 SYSUNINIT(nvme_sim_unregister, SI_SUB_DRIVERS, SI_ORDER_ANY,
389 nvme_sim_uninit, NULL);