2 * Copyright (C) 2012-2013 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/types.h>
36 #include <sys/param.h>
38 #define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command)
39 #define NVME_RESET_CONTROLLER _IO('n', 1)
41 #define NVME_IO_TEST _IOWR('n', 100, struct nvme_io_test)
42 #define NVME_BIO_TEST _IOWR('n', 101, struct nvme_io_test)
45 * Use to mark a command to apply to all namespaces, or to retrieve global
48 #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF)
50 /* Cap nvme to 1MB transfers driver explodes with larger sizes */
51 #define NVME_MAX_XFER_SIZE (MAXPHYS < (1<<20) ? MAXPHYS : (1<<20))
53 union cap_lo_register {
56 /** maximum queue entries supported */
59 /** contiguous queues required */
62 /** arbitration mechanism supported */
65 uint32_t reserved1 : 5;
72 union cap_hi_register {
75 /** doorbell stride */
78 uint32_t reserved3 : 1;
80 /** command sets supported */
83 uint32_t css_reserved : 3;
84 uint32_t reserved2 : 7;
86 /** memory page size minimum */
89 /** memory page size maximum */
92 uint32_t reserved1 : 8;
102 uint32_t reserved1 : 3;
104 /** i/o command set selected */
107 /** memory page size */
110 /** arbitration mechanism selected */
113 /** shutdown notification */
116 /** i/o submission queue entry size */
119 /** i/o completion queue entry size */
122 uint32_t reserved2 : 8;
127 NVME_SHN_NORMAL = 0x1,
128 NVME_SHN_ABRUPT = 0x2,
131 union csts_register {
137 /** controller fatal status */
140 /** shutdown status */
143 uint32_t reserved1 : 28;
148 NVME_SHST_NORMAL = 0x0,
149 NVME_SHST_OCCURRING = 0x1,
150 NVME_SHST_COMPLETE = 0x2,
156 /** admin submission queue size */
159 uint32_t reserved1 : 4;
161 /** admin completion queue size */
164 uint32_t reserved2 : 4;
168 struct nvme_registers
170 /** controller capabilities */
171 union cap_lo_register cap_lo;
172 union cap_hi_register cap_hi;
174 uint32_t vs; /* version */
175 uint32_t intms; /* interrupt mask set */
176 uint32_t intmc; /* interrupt mask clear */
178 /** controller configuration */
179 union cc_register cc;
183 /** controller status */
184 union csts_register csts;
188 /** admin queue attributes */
189 union aqa_register aqa;
191 uint64_t asq; /* admin submission queue base addr */
192 uint64_t acq; /* admin completion queue base addr */
193 uint32_t reserved3[0x3f2];
196 uint32_t sq_tdbl; /* submission queue tail doorbell */
197 uint32_t cq_hdbl; /* completion queue head doorbell */
198 } doorbell[1] __packed;
204 uint16_t opc : 8; /* opcode */
205 uint16_t fuse : 2; /* fused operation */
207 uint16_t cid; /* command identifier */
210 uint32_t nsid; /* namespace identifier */
217 uint64_t mptr; /* metadata pointer */
220 uint64_t prp1; /* prp entry 1 */
223 uint64_t prp2; /* prp entry 2 */
226 uint32_t cdw10; /* command-specific */
227 uint32_t cdw11; /* command-specific */
228 uint32_t cdw12; /* command-specific */
229 uint32_t cdw13; /* command-specific */
230 uint32_t cdw14; /* command-specific */
231 uint32_t cdw15; /* command-specific */
236 uint16_t p : 1; /* phase tag */
237 uint16_t sc : 8; /* status code */
238 uint16_t sct : 3; /* status code type */
240 uint16_t m : 1; /* more */
241 uint16_t dnr : 1; /* do not retry */
244 struct nvme_completion {
247 uint32_t cdw0; /* command-specific */
253 uint16_t sqhd; /* submission queue head pointer */
254 uint16_t sqid; /* submission queue identifier */
257 uint16_t cid; /* command identifier */
258 struct nvme_status status;
261 struct nvme_dsm_range {
265 uint64_t starting_lba;
268 /* status code types */
269 enum nvme_status_code_type {
270 NVME_SCT_GENERIC = 0x0,
271 NVME_SCT_COMMAND_SPECIFIC = 0x1,
272 NVME_SCT_MEDIA_ERROR = 0x2,
273 /* 0x3-0x6 - reserved */
274 NVME_SCT_VENDOR_SPECIFIC = 0x7,
277 /* generic command status codes */
278 enum nvme_generic_command_status_code {
279 NVME_SC_SUCCESS = 0x00,
280 NVME_SC_INVALID_OPCODE = 0x01,
281 NVME_SC_INVALID_FIELD = 0x02,
282 NVME_SC_COMMAND_ID_CONFLICT = 0x03,
283 NVME_SC_DATA_TRANSFER_ERROR = 0x04,
284 NVME_SC_ABORTED_POWER_LOSS = 0x05,
285 NVME_SC_INTERNAL_DEVICE_ERROR = 0x06,
286 NVME_SC_ABORTED_BY_REQUEST = 0x07,
287 NVME_SC_ABORTED_SQ_DELETION = 0x08,
288 NVME_SC_ABORTED_FAILED_FUSED = 0x09,
289 NVME_SC_ABORTED_MISSING_FUSED = 0x0a,
290 NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b,
291 NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c,
293 NVME_SC_LBA_OUT_OF_RANGE = 0x80,
294 NVME_SC_CAPACITY_EXCEEDED = 0x81,
295 NVME_SC_NAMESPACE_NOT_READY = 0x82,
298 /* command specific status codes */
299 enum nvme_command_specific_status_code {
300 NVME_SC_COMPLETION_QUEUE_INVALID = 0x00,
301 NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01,
302 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02,
303 NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03,
304 /* 0x04 - reserved */
305 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
306 NVME_SC_INVALID_FIRMWARE_SLOT = 0x06,
307 NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07,
308 NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08,
309 NVME_SC_INVALID_LOG_PAGE = 0x09,
310 NVME_SC_INVALID_FORMAT = 0x0a,
311 NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b,
313 NVME_SC_CONFLICTING_ATTRIBUTES = 0x80,
314 NVME_SC_INVALID_PROTECTION_INFO = 0x81,
315 NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82,
318 /* media error status codes */
319 enum nvme_media_error_status_code {
320 NVME_SC_WRITE_FAULTS = 0x80,
321 NVME_SC_UNRECOVERED_READ_ERROR = 0x81,
322 NVME_SC_GUARD_CHECK_ERROR = 0x82,
323 NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83,
324 NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84,
325 NVME_SC_COMPARE_FAILURE = 0x85,
326 NVME_SC_ACCESS_DENIED = 0x86,
330 enum nvme_admin_opcode {
331 NVME_OPC_DELETE_IO_SQ = 0x00,
332 NVME_OPC_CREATE_IO_SQ = 0x01,
333 NVME_OPC_GET_LOG_PAGE = 0x02,
334 /* 0x03 - reserved */
335 NVME_OPC_DELETE_IO_CQ = 0x04,
336 NVME_OPC_CREATE_IO_CQ = 0x05,
337 NVME_OPC_IDENTIFY = 0x06,
338 /* 0x07 - reserved */
339 NVME_OPC_ABORT = 0x08,
340 NVME_OPC_SET_FEATURES = 0x09,
341 NVME_OPC_GET_FEATURES = 0x0a,
342 /* 0x0b - reserved */
343 NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c,
344 /* 0x0d-0x0f - reserved */
345 NVME_OPC_FIRMWARE_ACTIVATE = 0x10,
346 NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11,
348 NVME_OPC_FORMAT_NVM = 0x80,
349 NVME_OPC_SECURITY_SEND = 0x81,
350 NVME_OPC_SECURITY_RECEIVE = 0x82,
353 /* nvme nvm opcodes */
354 enum nvme_nvm_opcode {
355 NVME_OPC_FLUSH = 0x00,
356 NVME_OPC_WRITE = 0x01,
357 NVME_OPC_READ = 0x02,
358 /* 0x03 - reserved */
359 NVME_OPC_WRITE_UNCORRECTABLE = 0x04,
360 NVME_OPC_COMPARE = 0x05,
361 /* 0x06-0x07 - reserved */
362 NVME_OPC_DATASET_MANAGEMENT = 0x09,
366 /* 0x00 - reserved */
367 NVME_FEAT_ARBITRATION = 0x01,
368 NVME_FEAT_POWER_MANAGEMENT = 0x02,
369 NVME_FEAT_LBA_RANGE_TYPE = 0x03,
370 NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04,
371 NVME_FEAT_ERROR_RECOVERY = 0x05,
372 NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06,
373 NVME_FEAT_NUMBER_OF_QUEUES = 0x07,
374 NVME_FEAT_INTERRUPT_COALESCING = 0x08,
375 NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
376 NVME_FEAT_WRITE_ATOMICITY = 0x0A,
377 NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B,
378 /* 0x0C-0x7F - reserved */
379 NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80,
380 /* 0x81-0xBF - command set specific (reserved) */
381 /* 0xC0-0xFF - vendor specific */
384 enum nvme_dsm_attribute {
385 NVME_DSM_ATTR_INTEGRAL_READ = 0x1,
386 NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2,
387 NVME_DSM_ATTR_DEALLOCATE = 0x4,
390 enum nvme_activate_action {
391 NVME_AA_REPLACE_NO_ACTIVATE = 0x0,
392 NVME_AA_REPLACE_ACTIVATE = 0x1,
393 NVME_AA_ACTIVATE = 0x2,
396 struct nvme_power_state {
398 uint16_t mp; /* Maximum Power */
400 uint8_t mps : 1; /* Max Power Scale */
401 uint8_t nops : 1; /* Non-Operational State */
402 uint8_t ps_rsvd2 : 6;
403 uint32_t enlat; /* Entry Latency */
404 uint32_t exlat; /* Exit Latency */
405 uint8_t rrt : 5; /* Relative Read Throughput */
406 uint8_t ps_rsvd3 : 3;
407 uint8_t rrl : 5; /* Relative Read Latency */
408 uint8_t ps_rsvd4 : 3;
409 uint8_t rwt : 5; /* Relative Write Throughput */
410 uint8_t ps_rsvd5 : 3;
411 uint8_t rwl : 5; /* Relative Write Latency */
412 uint8_t ps_rsvd6 : 3;
413 uint16_t idlp; /* Idle Power */
414 uint8_t ps_rsvd7 : 6;
415 uint8_t ips : 2; /* Idle Power Scale */
417 uint16_t actp; /* Active Power */
418 uint8_t apw : 3; /* Active Power Workload */
419 uint8_t ps_rsvd9 : 3;
420 uint8_t aps : 2; /* Active Power Scale */
421 uint8_t ps_rsvd10[9];
424 #define NVME_SERIAL_NUMBER_LENGTH 20
425 #define NVME_MODEL_NUMBER_LENGTH 40
426 #define NVME_FIRMWARE_REVISION_LENGTH 8
428 struct nvme_controller_data {
430 /* bytes 0-255: controller capabilities and features */
435 /** pci subsystem vendor id */
439 uint8_t sn[NVME_SERIAL_NUMBER_LENGTH];
442 uint8_t mn[NVME_MODEL_NUMBER_LENGTH];
444 /** firmware revision */
445 uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH];
447 /** recommended arbitration burst */
450 /** ieee oui identifier */
453 /** multi-interface capabilities */
456 /** maximum data transfer size */
459 uint8_t reserved1[178];
461 /* bytes 256-511: admin command set attributes */
463 /** optional admin command support */
465 /* supports security send/receive commands */
466 uint16_t security : 1;
468 /* supports format nvm command */
471 /* supports firmware activate/download commands */
472 uint16_t firmware : 1;
474 uint16_t oacs_rsvd : 13;
477 /** abort command limit */
480 /** asynchronous event request limit */
483 /** firmware updates */
485 /* first slot is read-only */
486 uint8_t slot1_ro : 1;
488 /* number of firmware slots */
489 uint8_t num_slots : 3;
491 uint8_t frmw_rsvd : 4;
494 /** log page attributes */
496 /* per namespace smart/health log page */
497 uint8_t ns_smart : 1;
499 uint8_t lpa_rsvd : 7;
502 /** error log page entries */
505 /** number of power states supported */
508 /** admin vendor specific command configuration */
510 /* admin vendor specific commands use spec format */
511 uint8_t spec_format : 1;
513 uint8_t avscc_rsvd : 7;
516 uint8_t reserved2[247];
518 /* bytes 512-703: nvm command set attributes */
520 /** submission queue entry size */
526 /** completion queue entry size */
532 uint8_t reserved3[2];
534 /** number of namespaces */
537 /** optional nvm command support */
539 uint16_t compare : 1;
540 uint16_t write_unc : 1;
542 uint16_t reserved: 13;
545 /** fused operation support */
548 /** format nvm attributes */
551 /** volatile write cache */
554 uint8_t reserved : 7;
557 /* TODO: flesh out remaining nvm command set attributes */
558 uint8_t reserved4[178];
560 /* bytes 704-2047: i/o command set attributes */
561 uint8_t reserved5[1344];
563 /* bytes 2048-3071: power state descriptors */
564 struct nvme_power_state power_state[32];
566 /* bytes 3072-4095: vendor specific */
568 } __packed __aligned(4);
570 struct nvme_namespace_data {
572 /** namespace size */
575 /** namespace capacity */
578 /** namespace utilization */
581 /** namespace features */
583 /** thin provisioning */
584 uint8_t thin_prov : 1;
585 uint8_t reserved1 : 7;
588 /** number of lba formats */
591 /** formatted lba size */
594 uint8_t extended : 1;
595 uint8_t reserved2 : 3;
598 /** metadata capabilities */
600 /* metadata can be transferred as part of data prp list */
601 uint8_t extended : 1;
603 /* metadata can be transferred with separate metadata pointer */
606 uint8_t reserved3 : 6;
609 /** end-to-end data protection capabilities */
611 /* protection information type 1 */
614 /* protection information type 2 */
617 /* protection information type 3 */
620 /* first eight bytes of metadata */
621 uint8_t md_start : 1;
623 /* last eight bytes of metadata */
627 /** end-to-end data protection type settings */
629 /* protection information type */
632 /* 1 == protection info transferred at start of metadata */
633 /* 0 == protection info transferred at end of metadata */
634 uint8_t md_start : 1;
636 uint8_t reserved4 : 4;
639 uint8_t reserved5[98];
641 /** lba format support */
649 /** relative performance */
652 uint32_t reserved6 : 6;
655 uint8_t reserved6[192];
657 uint8_t vendor_specific[3712];
658 } __packed __aligned(4);
662 /* 0x00 - reserved */
663 NVME_LOG_ERROR = 0x01,
664 NVME_LOG_HEALTH_INFORMATION = 0x02,
665 NVME_LOG_FIRMWARE_SLOT = 0x03,
666 /* 0x04-0x7F - reserved */
667 /* 0x80-0xBF - I/O command set specific */
668 /* 0xC0-0xFF - vendor specific */
671 struct nvme_error_information_entry {
673 uint64_t error_count;
676 struct nvme_status status;
677 uint16_t error_location;
680 uint8_t vendor_specific;
681 uint8_t reserved[35];
682 } __packed __aligned(4);
684 union nvme_critical_warning_state {
689 uint8_t available_spare : 1;
690 uint8_t temperature : 1;
691 uint8_t device_reliability : 1;
692 uint8_t read_only : 1;
693 uint8_t volatile_memory_backup : 1;
694 uint8_t reserved : 3;
698 struct nvme_health_information_page {
700 union nvme_critical_warning_state critical_warning;
702 uint16_t temperature;
703 uint8_t available_spare;
704 uint8_t available_spare_threshold;
705 uint8_t percentage_used;
707 uint8_t reserved[26];
710 * Note that the following are 128-bit values, but are
711 * defined as an array of 2 64-bit values.
713 /* Data Units Read is always in 512-byte units. */
714 uint64_t data_units_read[2];
715 /* Data Units Written is always in 512-byte units. */
716 uint64_t data_units_written[2];
717 /* For NVM command set, this includes Compare commands. */
718 uint64_t host_read_commands[2];
719 uint64_t host_write_commands[2];
720 /* Controller Busy Time is reported in minutes. */
721 uint64_t controller_busy_time[2];
722 uint64_t power_cycles[2];
723 uint64_t power_on_hours[2];
724 uint64_t unsafe_shutdowns[2];
725 uint64_t media_errors[2];
726 uint64_t num_error_info_log_entries[2];
727 uint32_t warning_temp_time;
728 uint32_t error_temp_time;
729 uint16_t temp_sensor[8];
731 uint8_t reserved2[296];
732 } __packed __aligned(4);
734 struct nvme_firmware_page {
737 uint8_t slot : 3; /* slot for current FW */
738 uint8_t reserved : 5;
742 uint64_t revision[7]; /* revisions for 7 slots */
743 uint8_t reserved2[448];
744 } __packed __aligned(4);
746 #define NVME_TEST_MAX_THREADS 128
748 struct nvme_io_test {
750 enum nvme_nvm_opcode opc;
752 uint32_t time; /* in seconds */
753 uint32_t num_threads;
755 uint64_t io_completed[NVME_TEST_MAX_THREADS];
758 enum nvme_io_test_flags {
761 * Specifies whether dev_refthread/dev_relthread should be
762 * called during NVME_BIO_TEST. Ignored for other test
765 NVME_TEST_FLAG_REFTHREAD = 0x1,
768 struct nvme_pt_command {
771 * cmd is used to specify a passthrough command to a controller or
774 * The following fields from cmd may be specified by the caller:
776 * * nsid (namespace id) - for admin commands only
779 * Remaining fields must be set to 0 by the caller.
781 struct nvme_command cmd;
784 * cpl returns completion status for the passthrough command
787 * The following fields will be filled out by the driver, for
788 * consumption by the caller:
790 * * status (except for phase)
792 * Remaining fields will be set to 0 by the driver.
794 struct nvme_completion cpl;
796 /* buf is the data buffer associated with this passthrough command. */
800 * len is the length of the data buffer associated with this
801 * passthrough command.
806 * is_read = 1 if the passthrough command will read data into the
807 * supplied buffer from the controller.
809 * is_read = 0 if the passthrough command will write data from the
810 * supplied buffer to the controller.
815 * driver_lock is used by the driver only. It must be set to 0
818 struct mtx * driver_lock;
821 #define nvme_completion_is_error(cpl) \
822 ((cpl)->status.sc != 0 || (cpl)->status.sct != 0)
824 void nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
830 struct nvme_namespace;
831 struct nvme_controller;
832 struct nvme_consumer;
834 typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
836 typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *);
837 typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *);
838 typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *,
839 uint32_t, void *, uint32_t);
840 typedef void (*nvme_cons_fail_fn_t)(void *);
842 enum nvme_namespace_flags {
843 NVME_NS_DEALLOCATE_SUPPORTED = 0x1,
844 NVME_NS_FLUSH_SUPPORTED = 0x2,
847 int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
848 struct nvme_pt_command *pt,
849 uint32_t nsid, int is_user_buffer,
852 /* Admin functions */
853 void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
854 uint8_t feature, uint32_t cdw11,
855 void *payload, uint32_t payload_size,
856 nvme_cb_fn_t cb_fn, void *cb_arg);
857 void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
858 uint8_t feature, uint32_t cdw11,
859 void *payload, uint32_t payload_size,
860 nvme_cb_fn_t cb_fn, void *cb_arg);
861 void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
862 uint8_t log_page, uint32_t nsid,
863 void *payload, uint32_t payload_size,
864 nvme_cb_fn_t cb_fn, void *cb_arg);
866 /* NVM I/O functions */
867 int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
868 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
870 int nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
871 nvme_cb_fn_t cb_fn, void *cb_arg);
872 int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
873 uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
875 int nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
876 nvme_cb_fn_t cb_fn, void *cb_arg);
877 int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
878 uint8_t num_ranges, nvme_cb_fn_t cb_fn,
880 int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
882 int nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset,
885 /* Registration functions */
886 struct nvme_consumer * nvme_register_consumer(nvme_cons_ns_fn_t ns_fn,
887 nvme_cons_ctrlr_fn_t ctrlr_fn,
888 nvme_cons_async_fn_t async_fn,
889 nvme_cons_fail_fn_t fail_fn);
890 void nvme_unregister_consumer(struct nvme_consumer *consumer);
892 /* Controller helper functions */
893 device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr);
894 const struct nvme_controller_data *
895 nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
897 /* Namespace helper functions */
898 uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
899 uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns);
900 uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns);
901 uint64_t nvme_ns_get_size(struct nvme_namespace *ns);
902 uint32_t nvme_ns_get_flags(struct nvme_namespace *ns);
903 const char * nvme_ns_get_serial_number(struct nvme_namespace *ns);
904 const char * nvme_ns_get_model_number(struct nvme_namespace *ns);
905 const struct nvme_namespace_data *
906 nvme_ns_get_data(struct nvme_namespace *ns);
907 uint32_t nvme_ns_get_stripesize(struct nvme_namespace *ns);
909 int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
912 /* Command building helper functions -- shared with CAM */
914 void nvme_ns_flush_cmd(struct nvme_command *cmd, uint16_t nsid)
917 cmd->opc = NVME_OPC_FLUSH;
922 void nvme_ns_rw_cmd(struct nvme_command *cmd, uint32_t rwcmd, uint16_t nsid,
923 uint64_t lba, uint32_t count)
927 cmd->cdw10 = lba & 0xffffffffu;
928 cmd->cdw11 = lba >> 32;
929 cmd->cdw12 = count-1;
936 void nvme_ns_write_cmd(struct nvme_command *cmd, uint16_t nsid,
937 uint64_t lba, uint32_t count)
939 nvme_ns_rw_cmd(cmd, NVME_OPC_WRITE, nsid, lba, count);
943 void nvme_ns_read_cmd(struct nvme_command *cmd, uint16_t nsid,
944 uint64_t lba, uint32_t count)
946 nvme_ns_rw_cmd(cmd, NVME_OPC_READ, nsid, lba, count);
950 void nvme_ns_trim_cmd(struct nvme_command *cmd, uint16_t nsid,
953 cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
955 cmd->cdw10 = num_ranges - 1;
956 cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
961 #endif /* __NVME_H__ */