]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_ns_cmd.c
Merge ^/vendor/clang/dist up to its last change, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_ns_cmd.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "nvme_private.h"
33
34 int
35 nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
36     uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
37 {
38         struct nvme_request     *req;
39
40         req = nvme_allocate_request_vaddr(payload,
41             lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
42
43         if (req == NULL)
44                 return (ENOMEM);
45
46         nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
47
48         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
49
50         return (0);
51 }
52
53 int
54 nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
55     nvme_cb_fn_t cb_fn, void *cb_arg)
56 {
57         struct nvme_request     *req;
58         uint64_t                lba;
59         uint64_t                lba_count;
60
61         req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
62
63         if (req == NULL)
64                 return (ENOMEM);
65
66         lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
67         lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
68         nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
69
70         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
71
72         return (0);
73 }
74
75 int
76 nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
77     uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
78 {
79         struct nvme_request     *req;
80
81         req = nvme_allocate_request_vaddr(payload,
82             lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
83
84         if (req == NULL)
85                 return (ENOMEM);
86
87         nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
88
89         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
90
91         return (0);
92 }
93
94 int
95 nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
96     nvme_cb_fn_t cb_fn, void *cb_arg)
97 {
98         struct nvme_request     *req;
99         uint64_t                lba;
100         uint64_t                lba_count;
101
102         req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
103
104         if (req == NULL)
105                 return (ENOMEM);
106         lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
107         lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
108         nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
109
110         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
111
112         return (0);
113 }
114
115 int
116 nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
117     uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
118 {
119         struct nvme_request     *req;
120         struct nvme_command     *cmd;
121
122         req = nvme_allocate_request_vaddr(payload,
123             num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
124
125         if (req == NULL)
126                 return (ENOMEM);
127
128         cmd = &req->cmd;
129         cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
130         cmd->nsid = htole32(ns->id);
131
132         /* TODO: create a delete command data structure */
133         cmd->cdw10 = htole32(num_ranges - 1);
134         cmd->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
135
136         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
137
138         return (0);
139 }
140
141 int
142 nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
143 {
144         struct nvme_request     *req;
145
146         req = nvme_allocate_request_null(cb_fn, cb_arg);
147
148         if (req == NULL)
149                 return (ENOMEM);
150
151         nvme_ns_flush_cmd(&req->cmd, ns->id);
152         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
153
154         return (0);
155 }
156
157 /* Timeout = 1 sec */
158 #define NVD_DUMP_TIMEOUT        200000
159
160 int
161 nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, size_t len)
162 {
163         struct nvme_completion_poll_status status;
164         struct nvme_request *req;
165         struct nvme_command *cmd;
166         uint64_t lba, lba_count;
167         int i;
168
169         status.done = FALSE;
170         req = nvme_allocate_request_vaddr(virt, len, nvme_completion_poll_cb,
171             &status);
172         if (req == NULL)
173                 return (ENOMEM);
174
175         cmd = &req->cmd;
176
177         if (len > 0) {
178                 lba = offset / nvme_ns_get_sector_size(ns);
179                 lba_count = len / nvme_ns_get_sector_size(ns);
180                 nvme_ns_write_cmd(cmd, ns->id, lba, lba_count);
181         } else
182                 nvme_ns_flush_cmd(cmd, ns->id);
183
184         nvme_ctrlr_submit_io_request(ns->ctrlr, req);
185         if (req->qpair == NULL)
186                 return (ENXIO);
187
188         i = 0;
189         while ((i++ < NVD_DUMP_TIMEOUT) && (status.done == FALSE)) {
190                 DELAY(5);
191                 nvme_qpair_process_completions(req->qpair);
192         }
193
194         /*
195          * Normally, when using the polling interface, we can't return a
196          * timeout error because we don't know when the completion routines
197          * will be called if the command later completes. However, in this
198          * case we're running a system dump, so all interrupts are turned
199          * off, the scheduler isn't running so there's nothing to complete
200          * the transaction.
201          */
202         if (status.done == FALSE)
203                 return (ETIMEDOUT);
204
205         return (0);
206 }