]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_qpair.c
Keep track of the number of retried commands.
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_qpair.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/proc.h>
36
37 #include <dev/pci/pcivar.h>
38
39 #include "nvme_private.h"
40
41 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
42 #define DO_NOT_RETRY    1
43
44 static void     _nvme_qpair_submit_request(struct nvme_qpair *qpair,
45                                            struct nvme_request *req);
46 static void     nvme_qpair_destroy(struct nvme_qpair *qpair);
47
48 struct nvme_opcode_string {
49
50         uint16_t        opc;
51         const char *    str;
52 };
53
54 static struct nvme_opcode_string admin_opcode[] = {
55         { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
56         { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
57         { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
58         { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
59         { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
60         { NVME_OPC_IDENTIFY, "IDENTIFY" },
61         { NVME_OPC_ABORT, "ABORT" },
62         { NVME_OPC_SET_FEATURES, "SET FEATURES" },
63         { NVME_OPC_GET_FEATURES, "GET FEATURES" },
64         { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
65         { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
66         { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
67         { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" },
68         { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" },
69         { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" },
70         { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" },
71         { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" },
72         { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" },
73         { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" },
74         { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" },
75         { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" },
76         { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
77         { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
78         { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
79         { NVME_OPC_SANITIZE, "SANITIZE" },
80         { 0xFFFF, "ADMIN COMMAND" }
81 };
82
83 static struct nvme_opcode_string io_opcode[] = {
84         { NVME_OPC_FLUSH, "FLUSH" },
85         { NVME_OPC_WRITE, "WRITE" },
86         { NVME_OPC_READ, "READ" },
87         { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
88         { NVME_OPC_COMPARE, "COMPARE" },
89         { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" },
90         { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
91         { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" },
92         { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" },
93         { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" },
94         { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" },
95         { 0xFFFF, "IO COMMAND" }
96 };
97
98 static const char *
99 get_admin_opcode_string(uint16_t opc)
100 {
101         struct nvme_opcode_string *entry;
102
103         entry = admin_opcode;
104
105         while (entry->opc != 0xFFFF) {
106                 if (entry->opc == opc)
107                         return (entry->str);
108                 entry++;
109         }
110         return (entry->str);
111 }
112
113 static const char *
114 get_io_opcode_string(uint16_t opc)
115 {
116         struct nvme_opcode_string *entry;
117
118         entry = io_opcode;
119
120         while (entry->opc != 0xFFFF) {
121                 if (entry->opc == opc)
122                         return (entry->str);
123                 entry++;
124         }
125         return (entry->str);
126 }
127
128
129 static void
130 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
131     struct nvme_command *cmd)
132 {
133
134         nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
135             "cdw10:%08x cdw11:%08x\n",
136             get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
137             le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
138 }
139
140 static void
141 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
142     struct nvme_command *cmd)
143 {
144
145         switch (cmd->opc) {
146         case NVME_OPC_WRITE:
147         case NVME_OPC_READ:
148         case NVME_OPC_WRITE_UNCORRECTABLE:
149         case NVME_OPC_COMPARE:
150         case NVME_OPC_WRITE_ZEROES:
151                 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
152                     "lba:%llu len:%d\n",
153                     get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
154                     ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
155                     (le32toh(cmd->cdw12) & 0xFFFF) + 1);
156                 break;
157         case NVME_OPC_FLUSH:
158         case NVME_OPC_DATASET_MANAGEMENT:
159         case NVME_OPC_RESERVATION_REGISTER:
160         case NVME_OPC_RESERVATION_REPORT:
161         case NVME_OPC_RESERVATION_ACQUIRE:
162         case NVME_OPC_RESERVATION_RELEASE:
163                 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
164                     get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
165                 break;
166         default:
167                 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
168                     get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
169                     cmd->cid, le32toh(cmd->nsid));
170                 break;
171         }
172 }
173
174 static void
175 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
176 {
177         if (qpair->id == 0)
178                 nvme_admin_qpair_print_command(qpair, cmd);
179         else
180                 nvme_io_qpair_print_command(qpair, cmd);
181         if (nvme_verbose_cmd_dump) {
182                 nvme_printf(qpair->ctrlr,
183                     "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
184                     cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
185                     (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
186                 nvme_printf(qpair->ctrlr,
187                     "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
188                     cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
189                     cmd->cdw15);
190         }
191 }
192
193 struct nvme_status_string {
194
195         uint16_t        sc;
196         const char *    str;
197 };
198
199 static struct nvme_status_string generic_status[] = {
200         { NVME_SC_SUCCESS, "SUCCESS" },
201         { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
202         { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
203         { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
204         { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
205         { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
206         { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
207         { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
208         { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
209         { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
210         { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
211         { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
212         { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
213         { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
214         { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
215         { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
216         { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
217         { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
218         { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
219         { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
220         { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
221         { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
222         { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
223         { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
224         { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
225         { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
226         { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
227         { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
228         { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
229         { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
230         { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
231
232         { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
233         { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
234         { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
235         { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
236         { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
237         { 0xFFFF, "GENERIC" }
238 };
239
240 static struct nvme_status_string command_specific_status[] = {
241         { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
242         { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
243         { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
244         { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
245         { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
246         { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
247         { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
248         { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
249         { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
250         { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
251         { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
252         { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
253         { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
254         { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
255         { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
256         { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
257         { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
258         { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
259         { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
260         { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
261         { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
262         { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
263         { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
264         { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
265         { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
266         { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
267         { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
268         { NVME_SC_SELT_TEST_IN_PROGRESS, "DEVICE SELT-TEST IN PROGRESS" },
269         { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
270         { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
271         { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
272         { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
273         { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
274
275         { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
276         { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
277         { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
278         { 0xFFFF, "COMMAND SPECIFIC" }
279 };
280
281 static struct nvme_status_string media_error_status[] = {
282         { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
283         { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
284         { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
285         { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
286         { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
287         { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
288         { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
289         { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
290         { 0xFFFF, "MEDIA ERROR" }
291 };
292
293 static const char *
294 get_status_string(uint16_t sct, uint16_t sc)
295 {
296         struct nvme_status_string *entry;
297
298         switch (sct) {
299         case NVME_SCT_GENERIC:
300                 entry = generic_status;
301                 break;
302         case NVME_SCT_COMMAND_SPECIFIC:
303                 entry = command_specific_status;
304                 break;
305         case NVME_SCT_MEDIA_ERROR:
306                 entry = media_error_status;
307                 break;
308         case NVME_SCT_VENDOR_SPECIFIC:
309                 return ("VENDOR SPECIFIC");
310         default:
311                 return ("RESERVED");
312         }
313
314         while (entry->sc != 0xFFFF) {
315                 if (entry->sc == sc)
316                         return (entry->str);
317                 entry++;
318         }
319         return (entry->str);
320 }
321
322 static void
323 nvme_qpair_print_completion(struct nvme_qpair *qpair,
324     struct nvme_completion *cpl)
325 {
326         uint16_t sct, sc;
327
328         sct = NVME_STATUS_GET_SCT(cpl->status);
329         sc = NVME_STATUS_GET_SC(cpl->status);
330
331         nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
332             get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid,
333             cpl->cdw0);
334 }
335
336 static boolean_t
337 nvme_completion_is_retry(const struct nvme_completion *cpl)
338 {
339         uint8_t sct, sc, dnr;
340
341         sct = NVME_STATUS_GET_SCT(cpl->status);
342         sc = NVME_STATUS_GET_SC(cpl->status);
343         dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */
344
345         /*
346          * TODO: spec is not clear how commands that are aborted due
347          *  to TLER will be marked.  So for now, it seems
348          *  NAMESPACE_NOT_READY is the only case where we should
349          *  look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
350          *  set the DNR bit correctly since the driver controls that.
351          */
352         switch (sct) {
353         case NVME_SCT_GENERIC:
354                 switch (sc) {
355                 case NVME_SC_ABORTED_BY_REQUEST:
356                 case NVME_SC_NAMESPACE_NOT_READY:
357                         if (dnr)
358                                 return (0);
359                         else
360                                 return (1);
361                 case NVME_SC_INVALID_OPCODE:
362                 case NVME_SC_INVALID_FIELD:
363                 case NVME_SC_COMMAND_ID_CONFLICT:
364                 case NVME_SC_DATA_TRANSFER_ERROR:
365                 case NVME_SC_ABORTED_POWER_LOSS:
366                 case NVME_SC_INTERNAL_DEVICE_ERROR:
367                 case NVME_SC_ABORTED_SQ_DELETION:
368                 case NVME_SC_ABORTED_FAILED_FUSED:
369                 case NVME_SC_ABORTED_MISSING_FUSED:
370                 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
371                 case NVME_SC_COMMAND_SEQUENCE_ERROR:
372                 case NVME_SC_LBA_OUT_OF_RANGE:
373                 case NVME_SC_CAPACITY_EXCEEDED:
374                 default:
375                         return (0);
376                 }
377         case NVME_SCT_COMMAND_SPECIFIC:
378         case NVME_SCT_MEDIA_ERROR:
379         case NVME_SCT_VENDOR_SPECIFIC:
380         default:
381                 return (0);
382         }
383 }
384
385 static void
386 nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
387     struct nvme_completion *cpl, error_print_t print_on_error)
388 {
389         struct nvme_request     *req;
390         boolean_t               retry, error;
391
392         req = tr->req;
393         error = nvme_completion_is_error(cpl);
394         retry = error && nvme_completion_is_retry(cpl) &&
395            req->retries < nvme_retry_count;
396         if (retry)
397                 qpair->num_retries++;
398
399         if (error && (print_on_error == ERROR_PRINT_ALL ||
400                 (!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
401                 nvme_qpair_print_command(qpair, &req->cmd);
402                 nvme_qpair_print_completion(qpair, cpl);
403         }
404
405         qpair->act_tr[cpl->cid] = NULL;
406
407         KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
408
409         if (req->cb_fn && !retry)
410                 req->cb_fn(req->cb_arg, cpl);
411
412         mtx_lock(&qpair->lock);
413         callout_stop(&tr->timer);
414
415         if (retry) {
416                 req->retries++;
417                 nvme_qpair_submit_tracker(qpair, tr);
418         } else {
419                 if (req->type != NVME_REQUEST_NULL) {
420                         bus_dmamap_sync(qpair->dma_tag_payload,
421                             tr->payload_dma_map,
422                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
423                         bus_dmamap_unload(qpair->dma_tag_payload,
424                             tr->payload_dma_map);
425                 }
426
427                 nvme_free_request(req);
428                 tr->req = NULL;
429
430                 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
431                 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
432
433                 /*
434                  * If the controller is in the middle of resetting, don't
435                  *  try to submit queued requests here - let the reset logic
436                  *  handle that instead.
437                  */
438                 if (!STAILQ_EMPTY(&qpair->queued_req) &&
439                     !qpair->ctrlr->is_resetting) {
440                         req = STAILQ_FIRST(&qpair->queued_req);
441                         STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
442                         _nvme_qpair_submit_request(qpair, req);
443                 }
444         }
445
446         mtx_unlock(&qpair->lock);
447 }
448
449 static void
450 nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
451     struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
452     error_print_t print_on_error)
453 {
454         struct nvme_completion  cpl;
455
456         memset(&cpl, 0, sizeof(cpl));
457         cpl.sqid = qpair->id;
458         cpl.cid = tr->cid;
459         cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
460         cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
461         cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
462         nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
463 }
464
465 void
466 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
467     struct nvme_request *req, uint32_t sct, uint32_t sc)
468 {
469         struct nvme_completion  cpl;
470         boolean_t               error;
471
472         memset(&cpl, 0, sizeof(cpl));
473         cpl.sqid = qpair->id;
474         cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
475         cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
476
477         error = nvme_completion_is_error(&cpl);
478
479         if (error) {
480                 nvme_qpair_print_command(qpair, &req->cmd);
481                 nvme_qpair_print_completion(qpair, &cpl);
482         }
483
484         if (req->cb_fn)
485                 req->cb_fn(req->cb_arg, &cpl);
486
487         nvme_free_request(req);
488 }
489
490 bool
491 nvme_qpair_process_completions(struct nvme_qpair *qpair)
492 {
493         struct nvme_tracker     *tr;
494         struct nvme_completion  cpl;
495         int done = 0;
496         bool in_panic = dumping || SCHEDULER_STOPPED();
497
498         qpair->num_intr_handler_calls++;
499
500         /*
501          * qpair is not enabled, likely because a controller reset is is in
502          * progress.  Ignore the interrupt - any I/O that was associated with
503          * this interrupt will get retried when the reset is complete.
504          */
505         if (!qpair->is_enabled)
506                 return (false);
507
508         /*
509          * A panic can stop the CPU this routine is running on at any point.  If
510          * we're called during a panic, complete the sq_head wrap protocol for
511          * the case where we are interrupted just after the increment at 1
512          * below, but before we can reset cq_head to zero at 2. Also cope with
513          * the case where we do the zero at 2, but may or may not have done the
514          * phase adjustment at step 3. The panic machinery flushes all pending
515          * memory writes, so we can make these strong ordering assumptions
516          * that would otherwise be unwise if we were racing in real time.
517          */
518         if (__predict_false(in_panic)) {
519                 if (qpair->cq_head == qpair->num_entries) {
520                         /*
521                          * Here we know that we need to zero cq_head and then negate
522                          * the phase, which hasn't been assigned if cq_head isn't
523                          * zero due to the atomic_store_rel.
524                          */
525                         qpair->cq_head = 0;
526                         qpair->phase = !qpair->phase;
527                 } else if (qpair->cq_head == 0) {
528                         /*
529                          * In this case, we know that the assignment at 2
530                          * happened below, but we don't know if it 3 happened or
531                          * not. To do this, we look at the last completion
532                          * entry and set the phase to the opposite phase
533                          * that it has. This gets us back in sync
534                          */
535                         cpl = qpair->cpl[qpair->num_entries - 1];
536                         nvme_completion_swapbytes(&cpl);
537                         qpair->phase = !NVME_STATUS_GET_P(cpl.status);
538                 }
539         }
540
541         bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
542             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
543         while (1) {
544                 cpl = qpair->cpl[qpair->cq_head];
545
546                 /* Convert to host endian */
547                 nvme_completion_swapbytes(&cpl);
548
549                 if (NVME_STATUS_GET_P(cpl.status) != qpair->phase)
550                         break;
551
552                 tr = qpair->act_tr[cpl.cid];
553
554                 if (tr != NULL) {
555                         nvme_qpair_complete_tracker(qpair, tr, &cpl, ERROR_PRINT_ALL);
556                         qpair->sq_head = cpl.sqhd;
557                         done++;
558                 } else if (!in_panic) {
559                         /*
560                          * A missing tracker is normally an error.  However, a
561                          * panic can stop the CPU this routine is running on
562                          * after completing an I/O but before updating
563                          * qpair->cq_head at 1 below.  Later, we re-enter this
564                          * routine to poll I/O associated with the kernel
565                          * dump. We find that the tr has been set to null before
566                          * calling the completion routine.  If it hasn't
567                          * completed (or it triggers a panic), then '1' below
568                          * won't have updated cq_head. Rather than panic again,
569                          * ignore this condition because it's not unexpected.
570                          */
571                         nvme_printf(qpair->ctrlr,
572                             "cpl does not map to outstanding cmd\n");
573                         /* nvme_dump_completion expects device endianess */
574                         nvme_dump_completion(&qpair->cpl[qpair->cq_head]);
575                         KASSERT(0, ("received completion for unknown cmd"));
576                 }
577
578                 /*
579                  * There's a number of races with the following (see above) when
580                  * the system panics. We compensate for each one of them by
581                  * using the atomic store to force strong ordering (at least when
582                  * viewed in the aftermath of a panic).
583                  */
584                 if (++qpair->cq_head == qpair->num_entries) {           /* 1 */
585                         atomic_store_rel_int(&qpair->cq_head, 0);       /* 2 */
586                         qpair->phase = !qpair->phase;                   /* 3 */
587                 }
588
589                 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl,
590                     qpair->cq_head);
591         }
592         return (done != 0);
593 }
594
595 static void
596 nvme_qpair_msix_handler(void *arg)
597 {
598         struct nvme_qpair *qpair = arg;
599
600         nvme_qpair_process_completions(qpair);
601 }
602
603 int
604 nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
605     uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
606     struct nvme_controller *ctrlr)
607 {
608         struct nvme_tracker     *tr;
609         size_t                  cmdsz, cplsz, prpsz, allocsz, prpmemsz;
610         uint64_t                queuemem_phys, prpmem_phys, list_phys;
611         uint8_t                 *queuemem, *prpmem, *prp_list;
612         int                     i, err;
613
614         qpair->id = id;
615         qpair->vector = vector;
616         qpair->num_entries = num_entries;
617         qpair->num_trackers = num_trackers;
618         qpair->ctrlr = ctrlr;
619
620         if (ctrlr->msix_enabled) {
621
622                 /*
623                  * MSI-X vector resource IDs start at 1, so we add one to
624                  *  the queue's vector to get the corresponding rid to use.
625                  */
626                 qpair->rid = vector + 1;
627
628                 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
629                     &qpair->rid, RF_ACTIVE);
630                 bus_setup_intr(ctrlr->dev, qpair->res,
631                     INTR_TYPE_MISC | INTR_MPSAFE, NULL,
632                     nvme_qpair_msix_handler, qpair, &qpair->tag);
633                 if (id == 0) {
634                         bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
635                             "admin");
636                 } else {
637                         bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
638                             "io%d", id - 1);
639                 }
640         }
641
642         mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
643
644         /* Note: NVMe PRP format is restricted to 4-byte alignment. */
645         err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
646             4, PAGE_SIZE, BUS_SPACE_MAXADDR,
647             BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE,
648             (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0,
649             NULL, NULL, &qpair->dma_tag_payload);
650         if (err != 0) {
651                 nvme_printf(ctrlr, "payload tag create failed %d\n", err);
652                 goto out;
653         }
654
655         /*
656          * Each component must be page aligned, and individual PRP lists
657          * cannot cross a page boundary.
658          */
659         cmdsz = qpair->num_entries * sizeof(struct nvme_command);
660         cmdsz = roundup2(cmdsz, PAGE_SIZE);
661         cplsz = qpair->num_entries * sizeof(struct nvme_completion);
662         cplsz = roundup2(cplsz, PAGE_SIZE);
663         prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;;
664         prpmemsz = qpair->num_trackers * prpsz;
665         allocsz = cmdsz + cplsz + prpmemsz;
666
667         err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
668             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
669             allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
670         if (err != 0) {
671                 nvme_printf(ctrlr, "tag create failed %d\n", err);
672                 goto out;
673         }
674
675         if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
676             BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
677                 nvme_printf(ctrlr, "failed to alloc qpair memory\n");
678                 goto out;
679         }
680
681         if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
682             queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
683                 nvme_printf(ctrlr, "failed to load qpair memory\n");
684                 goto out;
685         }
686
687         qpair->num_cmds = 0;
688         qpair->num_intr_handler_calls = 0;
689         qpair->num_retries = 0;
690         qpair->cmd = (struct nvme_command *)queuemem;
691         qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
692         prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
693         qpair->cmd_bus_addr = queuemem_phys;
694         qpair->cpl_bus_addr = queuemem_phys + cmdsz;
695         prpmem_phys = queuemem_phys + cmdsz + cplsz;
696
697         qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
698         qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
699
700         TAILQ_INIT(&qpair->free_tr);
701         TAILQ_INIT(&qpair->outstanding_tr);
702         STAILQ_INIT(&qpair->queued_req);
703
704         list_phys = prpmem_phys;
705         prp_list = prpmem;
706         for (i = 0; i < qpair->num_trackers; i++) {
707
708                 if (list_phys + prpsz > prpmem_phys + prpmemsz) {
709                         qpair->num_trackers = i;
710                         break;
711                 }
712
713                 /*
714                  * Make sure that the PRP list for this tracker doesn't
715                  * overflow to another page.
716                  */
717                 if (trunc_page(list_phys) !=
718                     trunc_page(list_phys + prpsz - 1)) {
719                         list_phys = roundup2(list_phys, PAGE_SIZE);
720                         prp_list =
721                             (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE);
722                 }
723
724                 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK);
725                 bus_dmamap_create(qpair->dma_tag_payload, 0,
726                     &tr->payload_dma_map);
727                 callout_init(&tr->timer, 1);
728                 tr->cid = i;
729                 tr->qpair = qpair;
730                 tr->prp = (uint64_t *)prp_list;
731                 tr->prp_bus_addr = list_phys;
732                 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
733                 list_phys += prpsz;
734                 prp_list += prpsz;
735         }
736
737         if (qpair->num_trackers == 0) {
738                 nvme_printf(ctrlr, "failed to allocate enough trackers\n");
739                 goto out;
740         }
741
742         qpair->act_tr = malloc(sizeof(struct nvme_tracker *) *
743             qpair->num_entries, M_NVME, M_ZERO | M_WAITOK);
744         return (0);
745
746 out:
747         nvme_qpair_destroy(qpair);
748         return (ENOMEM);
749 }
750
751 static void
752 nvme_qpair_destroy(struct nvme_qpair *qpair)
753 {
754         struct nvme_tracker     *tr;
755
756         if (qpair->tag)
757                 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
758
759         if (mtx_initialized(&qpair->lock))
760                 mtx_destroy(&qpair->lock);
761
762         if (qpair->res)
763                 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
764                     rman_get_rid(qpair->res), qpair->res);
765
766         if (qpair->cmd != NULL) {
767                 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
768                 bus_dmamem_free(qpair->dma_tag, qpair->cmd,
769                     qpair->queuemem_map);
770         }
771
772         if (qpair->act_tr)
773                 free(qpair->act_tr, M_NVME);
774
775         while (!TAILQ_EMPTY(&qpair->free_tr)) {
776                 tr = TAILQ_FIRST(&qpair->free_tr);
777                 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
778                 bus_dmamap_destroy(qpair->dma_tag_payload,
779                     tr->payload_dma_map);
780                 free(tr, M_NVME);
781         }
782
783         if (qpair->dma_tag)
784                 bus_dma_tag_destroy(qpair->dma_tag);
785
786         if (qpair->dma_tag_payload)
787                 bus_dma_tag_destroy(qpair->dma_tag_payload);
788 }
789
790 static void
791 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
792 {
793         struct nvme_tracker     *tr;
794
795         tr = TAILQ_FIRST(&qpair->outstanding_tr);
796         while (tr != NULL) {
797                 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
798                         nvme_qpair_manual_complete_tracker(qpair, tr,
799                             NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
800                             ERROR_PRINT_NONE);
801                         tr = TAILQ_FIRST(&qpair->outstanding_tr);
802                 } else {
803                         tr = TAILQ_NEXT(tr, tailq);
804                 }
805         }
806 }
807
808 void
809 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
810 {
811
812         nvme_admin_qpair_abort_aers(qpair);
813         nvme_qpair_destroy(qpair);
814 }
815
816 void
817 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
818 {
819
820         nvme_qpair_destroy(qpair);
821 }
822
823 static void
824 nvme_abort_complete(void *arg, const struct nvme_completion *status)
825 {
826         struct nvme_tracker     *tr = arg;
827
828         /*
829          * If cdw0 == 1, the controller was not able to abort the command
830          *  we requested.  We still need to check the active tracker array,
831          *  to cover race where I/O timed out at same time controller was
832          *  completing the I/O.
833          */
834         if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
835                 /*
836                  * An I/O has timed out, and the controller was unable to
837                  *  abort it for some reason.  Construct a fake completion
838                  *  status, and then complete the I/O's tracker manually.
839                  */
840                 nvme_printf(tr->qpair->ctrlr,
841                     "abort command failed, aborting command manually\n");
842                 nvme_qpair_manual_complete_tracker(tr->qpair, tr,
843                     NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL);
844         }
845 }
846
847 static void
848 nvme_timeout(void *arg)
849 {
850         struct nvme_tracker     *tr = arg;
851         struct nvme_qpair       *qpair = tr->qpair;
852         struct nvme_controller  *ctrlr = qpair->ctrlr;
853         uint32_t                csts;
854         uint8_t                 cfs;
855
856         /*
857          * Read csts to get value of cfs - controller fatal status.
858          * If no fatal status, try to call the completion routine, and
859          * if completes transactions, report a missed interrupt and
860          * return (this may need to be rate limited). Otherwise, if
861          * aborts are enabled and the controller is not reporting
862          * fatal status, abort the command. Otherwise, just reset the
863          * controller and hope for the best.
864          */
865         csts = nvme_mmio_read_4(ctrlr, csts);
866         cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
867         if (cfs == 0 && nvme_qpair_process_completions(qpair)) {
868                 nvme_printf(ctrlr, "Missing interrupt\n");
869                 return;
870         }
871         if (ctrlr->enable_aborts && cfs == 0) {
872                 nvme_printf(ctrlr, "Aborting command due to a timeout.\n");
873                 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
874                     nvme_abort_complete, tr);
875         } else {
876                 nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
877                     cfs ? " and fatal error status" : "");
878                 nvme_ctrlr_reset(ctrlr);
879         }
880 }
881
882 void
883 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
884 {
885         struct nvme_request     *req;
886         struct nvme_controller  *ctrlr;
887
888         mtx_assert(&qpair->lock, MA_OWNED);
889
890         req = tr->req;
891         req->cmd.cid = tr->cid;
892         qpair->act_tr[tr->cid] = tr;
893         ctrlr = qpair->ctrlr;
894
895         if (req->timeout)
896                 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz,
897                     nvme_timeout, tr);
898
899         /* Copy the command from the tracker to the submission queue. */
900         memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
901
902         if (++qpair->sq_tail == qpair->num_entries)
903                 qpair->sq_tail = 0;
904
905         bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
906             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
907 #ifndef __powerpc__
908         /*
909          * powerpc's bus_dmamap_sync() already includes a heavyweight sync, but
910          * no other archs do.
911          */
912         wmb();
913 #endif
914
915         nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl,
916             qpair->sq_tail);
917
918         qpair->num_cmds++;
919 }
920
921 static void
922 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
923 {
924         struct nvme_tracker     *tr = arg;
925         uint32_t                cur_nseg;
926
927         /*
928          * If the mapping operation failed, return immediately.  The caller
929          *  is responsible for detecting the error status and failing the
930          *  tracker manually.
931          */
932         if (error != 0) {
933                 nvme_printf(tr->qpair->ctrlr,
934                     "nvme_payload_map err %d\n", error);
935                 return;
936         }
937
938         /*
939          * Note that we specified PAGE_SIZE for alignment and max
940          *  segment size when creating the bus dma tags.  So here
941          *  we can safely just transfer each segment to its
942          *  associated PRP entry.
943          */
944         tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
945
946         if (nseg == 2) {
947                 tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
948         } else if (nseg > 2) {
949                 cur_nseg = 1;
950                 tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
951                 while (cur_nseg < nseg) {
952                         tr->prp[cur_nseg-1] =
953                             htole64((uint64_t)seg[cur_nseg].ds_addr);
954                         cur_nseg++;
955                 }
956         } else {
957                 /*
958                  * prp2 should not be used by the controller
959                  *  since there is only one segment, but set
960                  *  to 0 just to be safe.
961                  */
962                 tr->req->cmd.prp2 = 0;
963         }
964
965         bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
966             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
967         nvme_qpair_submit_tracker(tr->qpair, tr);
968 }
969
970 static void
971 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
972 {
973         struct nvme_tracker     *tr;
974         int                     err = 0;
975
976         mtx_assert(&qpair->lock, MA_OWNED);
977
978         tr = TAILQ_FIRST(&qpair->free_tr);
979         req->qpair = qpair;
980
981         if (tr == NULL || !qpair->is_enabled) {
982                 /*
983                  * No tracker is available, or the qpair is disabled due to
984                  *  an in-progress controller-level reset or controller
985                  *  failure.
986                  */
987
988                 if (qpair->ctrlr->is_failed) {
989                         /*
990                          * The controller has failed.  Post the request to a
991                          *  task where it will be aborted, so that we do not
992                          *  invoke the request's callback in the context
993                          *  of the submission.
994                          */
995                         nvme_ctrlr_post_failed_request(qpair->ctrlr, req);
996                 } else {
997                         /*
998                          * Put the request on the qpair's request queue to be
999                          *  processed when a tracker frees up via a command
1000                          *  completion or when the controller reset is
1001                          *  completed.
1002                          */
1003                         STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1004                 }
1005                 return;
1006         }
1007
1008         TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
1009         TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
1010         tr->req = req;
1011
1012         switch (req->type) {
1013         case NVME_REQUEST_VADDR:
1014                 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
1015                     ("payload_size (%d) exceeds max_xfer_size (%d)\n",
1016                     req->payload_size, qpair->ctrlr->max_xfer_size));
1017                 err = bus_dmamap_load(tr->qpair->dma_tag_payload,
1018                     tr->payload_dma_map, req->u.payload, req->payload_size,
1019                     nvme_payload_map, tr, 0);
1020                 if (err != 0)
1021                         nvme_printf(qpair->ctrlr,
1022                             "bus_dmamap_load returned 0x%x!\n", err);
1023                 break;
1024         case NVME_REQUEST_NULL:
1025                 nvme_qpair_submit_tracker(tr->qpair, tr);
1026                 break;
1027         case NVME_REQUEST_BIO:
1028                 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
1029                     ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
1030                     (intmax_t)req->u.bio->bio_bcount,
1031                     qpair->ctrlr->max_xfer_size));
1032                 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
1033                     tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
1034                 if (err != 0)
1035                         nvme_printf(qpair->ctrlr,
1036                             "bus_dmamap_load_bio returned 0x%x!\n", err);
1037                 break;
1038         case NVME_REQUEST_CCB:
1039                 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload,
1040                     tr->payload_dma_map, req->u.payload,
1041                     nvme_payload_map, tr, 0);
1042                 if (err != 0)
1043                         nvme_printf(qpair->ctrlr,
1044                             "bus_dmamap_load_ccb returned 0x%x!\n", err);
1045                 break;
1046         default:
1047                 panic("unknown nvme request type 0x%x\n", req->type);
1048                 break;
1049         }
1050
1051         if (err != 0) {
1052                 /*
1053                  * The dmamap operation failed, so we manually fail the
1054                  *  tracker here with DATA_TRANSFER_ERROR status.
1055                  *
1056                  * nvme_qpair_manual_complete_tracker must not be called
1057                  *  with the qpair lock held.
1058                  */
1059                 mtx_unlock(&qpair->lock);
1060                 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1061                     NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
1062                 mtx_lock(&qpair->lock);
1063         }
1064 }
1065
1066 void
1067 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1068 {
1069
1070         mtx_lock(&qpair->lock);
1071         _nvme_qpair_submit_request(qpair, req);
1072         mtx_unlock(&qpair->lock);
1073 }
1074
1075 static void
1076 nvme_qpair_enable(struct nvme_qpair *qpair)
1077 {
1078
1079         qpair->is_enabled = TRUE;
1080 }
1081
1082 void
1083 nvme_qpair_reset(struct nvme_qpair *qpair)
1084 {
1085
1086         qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
1087
1088         /*
1089          * First time through the completion queue, HW will set phase
1090          *  bit on completions to 1.  So set this to 1 here, indicating
1091          *  we're looking for a 1 to know which entries have completed.
1092          *  we'll toggle the bit each time when the completion queue
1093          *  rolls over.
1094          */
1095         qpair->phase = 1;
1096
1097         memset(qpair->cmd, 0,
1098             qpair->num_entries * sizeof(struct nvme_command));
1099         memset(qpair->cpl, 0,
1100             qpair->num_entries * sizeof(struct nvme_completion));
1101 }
1102
1103 void
1104 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
1105 {
1106         struct nvme_tracker             *tr;
1107         struct nvme_tracker             *tr_temp;
1108
1109         /*
1110          * Manually abort each outstanding admin command.  Do not retry
1111          *  admin commands found here, since they will be left over from
1112          *  a controller reset and its likely the context in which the
1113          *  command was issued no longer applies.
1114          */
1115         TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1116                 nvme_printf(qpair->ctrlr,
1117                     "aborting outstanding admin command\n");
1118                 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1119                     NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1120         }
1121
1122         nvme_qpair_enable(qpair);
1123 }
1124
1125 void
1126 nvme_io_qpair_enable(struct nvme_qpair *qpair)
1127 {
1128         STAILQ_HEAD(, nvme_request)     temp;
1129         struct nvme_tracker             *tr;
1130         struct nvme_tracker             *tr_temp;
1131         struct nvme_request             *req;
1132
1133         /*
1134          * Manually abort each outstanding I/O.  This normally results in a
1135          *  retry, unless the retry count on the associated request has
1136          *  reached its limit.
1137          */
1138         TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1139                 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
1140                 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1141                     NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
1142         }
1143
1144         mtx_lock(&qpair->lock);
1145
1146         nvme_qpair_enable(qpair);
1147
1148         STAILQ_INIT(&temp);
1149         STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
1150
1151         while (!STAILQ_EMPTY(&temp)) {
1152                 req = STAILQ_FIRST(&temp);
1153                 STAILQ_REMOVE_HEAD(&temp, stailq);
1154                 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
1155                 nvme_qpair_print_command(qpair, &req->cmd);
1156                 _nvme_qpair_submit_request(qpair, req);
1157         }
1158
1159         mtx_unlock(&qpair->lock);
1160 }
1161
1162 static void
1163 nvme_qpair_disable(struct nvme_qpair *qpair)
1164 {
1165         struct nvme_tracker *tr;
1166
1167         qpair->is_enabled = FALSE;
1168         mtx_lock(&qpair->lock);
1169         TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
1170                 callout_stop(&tr->timer);
1171         mtx_unlock(&qpair->lock);
1172 }
1173
1174 void
1175 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
1176 {
1177
1178         nvme_qpair_disable(qpair);
1179         nvme_admin_qpair_abort_aers(qpair);
1180 }
1181
1182 void
1183 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1184 {
1185
1186         nvme_qpair_disable(qpair);
1187 }
1188
1189 void
1190 nvme_qpair_fail(struct nvme_qpair *qpair)
1191 {
1192         struct nvme_tracker             *tr;
1193         struct nvme_request             *req;
1194
1195         if (!mtx_initialized(&qpair->lock))
1196                 return;
1197
1198         mtx_lock(&qpair->lock);
1199
1200         while (!STAILQ_EMPTY(&qpair->queued_req)) {
1201                 req = STAILQ_FIRST(&qpair->queued_req);
1202                 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1203                 nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1204                 mtx_unlock(&qpair->lock);
1205                 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1206                     NVME_SC_ABORTED_BY_REQUEST);
1207                 mtx_lock(&qpair->lock);
1208         }
1209
1210         /* Manually abort each outstanding I/O. */
1211         while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1212                 tr = TAILQ_FIRST(&qpair->outstanding_tr);
1213                 /*
1214                  * Do not remove the tracker.  The abort_tracker path will
1215                  *  do that for us.
1216                  */
1217                 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1218                 mtx_unlock(&qpair->lock);
1219                 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1220                     NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1221                 mtx_lock(&qpair->lock);
1222         }
1223
1224         mtx_unlock(&qpair->lock);
1225 }
1226