]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/powerpc/pseries/phyp_vscsi.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / powerpc / pseries / phyp_vscsi.c
1 /*-
2  * Copyright 2013 Nathan Whitehorn
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/module.h>
35 #include <sys/selinfo.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/eventhandler.h>
39 #include <sys/rman.h>
40 #include <sys/bus_dma.h>
41 #include <sys/bio.h>
42 #include <sys/ioccom.h>
43 #include <sys/uio.h>
44 #include <sys/proc.h>
45 #include <sys/signalvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48 #include <sys/vmem.h>
49
50 #include <cam/cam.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_debug.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_periph.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59
60 #include <dev/ofw/openfirm.h>
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/ofw_bus_subr.h>
63
64 #include <machine/bus.h>
65 #include <machine/resource.h>
66
67 #include <powerpc/pseries/phyp-hvcall.h>
68
69 struct vscsi_softc;
70
71 /* VSCSI CRQ format from table 260 of PAPR spec 2.4 (page 760) */
72 struct vscsi_crq {
73         uint8_t valid;
74         uint8_t format;
75         uint8_t reserved;
76         uint8_t status;
77         uint16_t timeout;
78         uint16_t iu_length;
79         uint64_t iu_data;
80 };
81
82 struct vscsi_xfer {
83         TAILQ_ENTRY(vscsi_xfer) queue;
84         struct vscsi_softc *sc;
85         union ccb *ccb;
86         bus_dmamap_t dmamap;
87         uint64_t tag;
88         
89         vmem_addr_t srp_iu_offset;
90         vmem_size_t srp_iu_size;
91 };
92
93 TAILQ_HEAD(vscsi_xferq, vscsi_xfer);
94
95 struct vscsi_softc {
96         device_t        dev;
97         struct cam_devq *devq;
98         struct cam_sim  *sim;
99         struct cam_path *path;
100         struct mtx io_lock;
101
102         cell_t          unit;
103         int             bus_initialized;
104         int             bus_logged_in;
105         int             max_transactions;
106
107         int             irqid;
108         struct resource *irq;
109         void            *irq_cookie;
110
111         bus_dma_tag_t   crq_tag;
112         struct vscsi_crq *crq_queue;
113         int             n_crqs, cur_crq;
114         bus_dmamap_t    crq_map;
115         bus_addr_t      crq_phys;
116
117         vmem_t          *srp_iu_arena;
118         void            *srp_iu_queue;
119         bus_addr_t      srp_iu_phys;
120
121         bus_dma_tag_t   data_tag;
122
123         struct vscsi_xfer loginxp;
124         struct vscsi_xfer *xfer;
125         struct vscsi_xferq active_xferq;
126         struct vscsi_xferq free_xferq;
127 };
128
129 struct srp_login {
130         uint8_t type;
131         uint8_t reserved[7];
132         uint64_t tag;
133         uint64_t max_cmd_length;
134         uint32_t reserved2;
135         uint16_t buffer_formats;
136         uint8_t flags;
137         uint8_t reserved3[5];
138         uint8_t initiator_port_id[16];
139         uint8_t target_port_id[16];
140 } __packed;
141
142 struct srp_login_rsp {
143         uint8_t type;
144         uint8_t reserved[3];
145         uint32_t request_limit_delta;
146         uint8_t tag;
147         uint32_t max_i_to_t_len;
148         uint32_t max_t_to_i_len;
149         uint16_t buffer_formats;
150         uint8_t flags;
151         /* Some reserved bits follow */
152 } __packed;
153
154 struct srp_cmd {
155         uint8_t type;
156         uint8_t flags1;
157         uint8_t reserved[3];
158         uint8_t formats;
159         uint8_t out_buffer_count;
160         uint8_t in_buffer_count;
161         uint64_t tag;
162         uint32_t reserved2;
163         uint64_t lun;
164         uint8_t reserved3[3];
165         uint8_t additional_cdb;
166         uint8_t cdb[16];
167         uint8_t data_payload[0];
168 } __packed;
169
170 struct srp_rsp {
171         uint8_t type;
172         uint8_t reserved[3];
173         uint32_t request_limit_delta;
174         uint64_t tag;
175         uint16_t reserved2;
176         uint8_t flags;
177         uint8_t status;
178         uint32_t data_out_resid;
179         uint32_t data_in_resid;
180         uint32_t sense_data_len;
181         uint32_t response_data_len;
182         uint8_t data_payload[0];
183 } __packed;
184
185 struct srp_tsk_mgmt {
186         uint8_t type;
187         uint8_t reserved[7];
188         uint64_t tag;
189         uint32_t reserved2;
190         uint64_t lun;
191         uint8_t reserved3[2];
192         uint8_t function;
193         uint8_t reserved4;
194         uint64_t manage_tag;
195         uint64_t reserved5;
196 } __packed;
197
198 /* Message code type */
199 #define SRP_LOGIN_REQ   0x00
200 #define SRP_TSK_MGMT    0x01
201 #define SRP_CMD         0x02
202 #define SRP_I_LOGOUT    0x03
203
204 #define SRP_LOGIN_RSP   0xC0
205 #define SRP_RSP         0xC1
206 #define SRP_LOGIN_REJ   0xC2
207
208 #define SRP_T_LOGOUT    0x80
209 #define SRP_CRED_REQ    0x81
210 #define SRP_AER_REQ     0x82
211
212 #define SRP_CRED_RSP    0x41
213 #define SRP_AER_RSP     0x41
214
215 /* Flags for srp_rsp flags field */
216 #define SRP_RSPVALID    0x01
217 #define SRP_SNSVALID    0x02
218 #define SRP_DOOVER      0x04
219 #define SRP_DOUNDER     0x08
220 #define SRP_DIOVER      0x10
221 #define SRP_DIUNDER     0x20
222
223 #define MAD_SUCESS                      0x00
224 #define MAD_NOT_SUPPORTED               0xf1
225 #define MAD_FAILED                      0xf7
226
227 #define MAD_EMPTY_IU                    0x01
228 #define MAD_ERROR_LOGGING_REQUEST       0x02
229 #define MAD_ADAPTER_INFO_REQUEST        0x03
230 #define MAD_CAPABILITIES_EXCHANGE       0x05
231 #define MAD_PHYS_ADAP_INFO_REQUEST      0x06
232 #define MAD_TAPE_PASSTHROUGH_REQUEST    0x07
233 #define MAD_ENABLE_FAST_FAIL            0x08
234
235 static int      vscsi_probe(device_t);
236 static int      vscsi_attach(device_t);
237 static int      vscsi_detach(device_t);
238 static void     vscsi_cam_action(struct cam_sim *, union ccb *);
239 static void     vscsi_cam_poll(struct cam_sim *);
240 static void     vscsi_intr(void *arg);
241 static void     vscsi_check_response_queue(struct vscsi_softc *sc);
242 static void     vscsi_setup_bus(struct vscsi_softc *sc);
243
244 static void     vscsi_srp_login(struct vscsi_softc *sc);
245 static void     vscsi_crq_load_cb(void *, bus_dma_segment_t *, int, int);
246 static void     vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs,
247                     int nsegs, int err);
248 static void     vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb);
249 static void     vscsi_srp_response(struct vscsi_xfer *, struct vscsi_crq *);
250
251 static devclass_t       vscsi_devclass;
252 static device_method_t  vscsi_methods[] = {
253         DEVMETHOD(device_probe,         vscsi_probe),
254         DEVMETHOD(device_attach,        vscsi_attach),
255         DEVMETHOD(device_detach,        vscsi_detach),
256
257         DEVMETHOD_END
258 };
259 static driver_t vscsi_driver = {
260         "vscsi",
261         vscsi_methods,
262         sizeof(struct vscsi_softc)
263 };
264 DRIVER_MODULE(vscsi, vdevice, vscsi_driver, vscsi_devclass, 0, 0);
265 MALLOC_DEFINE(M_VSCSI, "vscsi", "CAM device queue for VSCSI");
266
267 static int
268 vscsi_probe(device_t dev)
269 {
270
271         if (!ofw_bus_is_compatible(dev, "IBM,v-scsi"))
272                 return (ENXIO);
273
274         device_set_desc(dev, "POWER Hypervisor Virtual SCSI Bus");
275         return (0);
276 }
277
278 static int
279 vscsi_attach(device_t dev)
280 {
281         struct vscsi_softc *sc;
282         struct vscsi_xfer *xp;
283         int error, i;
284
285         sc = device_get_softc(dev);
286         if (sc == NULL)
287                 return (EINVAL);
288
289         sc->dev = dev;
290         mtx_init(&sc->io_lock, "vscsi", NULL, MTX_DEF);
291
292         /* Get properties */
293         OF_getprop(ofw_bus_get_node(dev), "reg", &sc->unit, sizeof(sc->unit));
294
295         /* Setup interrupt */
296         sc->irqid = 0;
297         sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
298             RF_ACTIVE);
299
300         if (!sc->irq) {
301                 device_printf(dev, "Could not allocate IRQ\n");
302                 mtx_destroy(&sc->io_lock);
303                 return (ENXIO);
304         }
305
306         bus_setup_intr(dev, sc->irq, INTR_TYPE_CAM | INTR_MPSAFE |
307             INTR_ENTROPY, NULL, vscsi_intr, sc, &sc->irq_cookie);
308
309         /* Data DMA */
310         error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
311             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
312             256, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &sc->io_lock,
313             &sc->data_tag);
314
315         TAILQ_INIT(&sc->active_xferq);
316         TAILQ_INIT(&sc->free_xferq);
317
318         /* First XFER for login data */
319         sc->loginxp.sc = sc;
320         bus_dmamap_create(sc->data_tag, 0, &sc->loginxp.dmamap);
321         TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
322          
323         /* CRQ area */
324         error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
325             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 8*PAGE_SIZE,
326             1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->crq_tag);
327         error = bus_dmamem_alloc(sc->crq_tag, (void **)&sc->crq_queue,
328             BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->crq_map);
329         sc->crq_phys = 0;
330         sc->n_crqs = 0;
331         error = bus_dmamap_load(sc->crq_tag, sc->crq_map, sc->crq_queue,
332             8*PAGE_SIZE, vscsi_crq_load_cb, sc, 0);
333
334         mtx_lock(&sc->io_lock);
335         vscsi_setup_bus(sc);
336         sc->xfer = malloc(sizeof(sc->xfer[0])*sc->max_transactions, M_VSCSI,
337             M_NOWAIT);
338         for (i = 0; i < sc->max_transactions; i++) {
339                 xp = &sc->xfer[i];
340                 xp->sc = sc;
341
342                 error = bus_dmamap_create(sc->data_tag, 0, &xp->dmamap);
343                 if (error) {
344                         device_printf(dev, "Could not create DMA map (%d)\n",
345                             error);
346                         break;
347                 }
348
349                 TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
350         }
351         mtx_unlock(&sc->io_lock);
352
353         /* Allocate CAM bits */
354         if ((sc->devq = cam_simq_alloc(sc->max_transactions)) == NULL)
355                 return (ENOMEM);
356
357         sc->sim = cam_sim_alloc(vscsi_cam_action, vscsi_cam_poll, "vscsi", sc,
358                                 device_get_unit(dev), &sc->io_lock,
359                                 sc->max_transactions, sc->max_transactions,
360                                 sc->devq);
361         if (sc->sim == NULL) {
362                 cam_simq_free(sc->devq);
363                 sc->devq = NULL;
364                 device_printf(dev, "CAM SIM attach failed\n");
365                 return (EINVAL);
366         }
367
368
369         mtx_lock(&sc->io_lock);
370         if (xpt_bus_register(sc->sim, dev, 0) != 0) {
371                 device_printf(dev, "XPT bus registration failed\n");
372                 cam_sim_free(sc->sim, FALSE);
373                 sc->sim = NULL;
374                 cam_simq_free(sc->devq);
375                 sc->devq = NULL;
376                 mtx_unlock(&sc->io_lock);
377                 return (EINVAL);
378         }
379         mtx_unlock(&sc->io_lock);
380
381         return (0);
382 }
383
384 static int
385 vscsi_detach(device_t dev)
386 {
387         struct vscsi_softc *sc;
388
389         sc = device_get_softc(dev);
390         if (sc == NULL)
391                 return (EINVAL);
392
393         if (sc->sim != NULL) {
394                 mtx_lock(&sc->io_lock);
395                 xpt_bus_deregister(cam_sim_path(sc->sim));
396                 cam_sim_free(sc->sim, FALSE);
397                 sc->sim = NULL;
398                 mtx_unlock(&sc->io_lock);
399         }
400
401         if (sc->devq != NULL) {
402                 cam_simq_free(sc->devq);
403                 sc->devq = NULL;
404         }
405         
406         mtx_destroy(&sc->io_lock);
407
408         return (0);
409 }
410
411 static void
412 vscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
413 {
414         struct vscsi_softc *sc = cam_sim_softc(sim);
415
416         mtx_assert(&sc->io_lock, MA_OWNED);
417
418         switch (ccb->ccb_h.func_code) {
419         case XPT_PATH_INQ:
420         {
421                 struct ccb_pathinq *cpi = &ccb->cpi;
422
423                 cpi->version_num = 1;
424                 cpi->hba_inquiry = PI_TAG_ABLE;
425                 cpi->hba_misc = PIM_EXTLUNS;
426                 cpi->target_sprt = 0;
427                 cpi->hba_eng_cnt = 0;
428                 cpi->max_target = 0;
429                 cpi->max_lun = ~(lun_id_t)(0);
430                 cpi->initiator_id = ~0;
431                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
432                 strncpy(cpi->hba_vid, "IBM", HBA_IDLEN);
433                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
434                 cpi->unit_number = cam_sim_unit(sim);
435                 cpi->bus_id = cam_sim_bus(sim);
436                 cpi->base_transfer_speed = 150000;
437                 cpi->transport = XPORT_SRP;
438                 cpi->transport_version = 0;
439                 cpi->protocol = PROTO_SCSI;
440                 cpi->protocol_version = SCSI_REV_SPC4;
441                 cpi->ccb_h.status = CAM_REQ_CMP;
442                 break;
443         }
444         case XPT_RESET_BUS:
445                 ccb->ccb_h.status = CAM_REQ_CMP;
446                 break;
447         case XPT_RESET_DEV:
448                 ccb->ccb_h.status = CAM_REQ_INPROG;
449                 vscsi_task_management(sc, ccb);
450                 return;
451         case XPT_GET_TRAN_SETTINGS:
452                 ccb->cts.protocol = PROTO_SCSI;
453                 ccb->cts.protocol_version = SCSI_REV_SPC4;
454                 ccb->cts.transport = XPORT_SRP;
455                 ccb->cts.transport_version = 0;
456                 ccb->cts.proto_specific.valid = 0;
457                 ccb->cts.xport_specific.valid = 0;
458                 ccb->ccb_h.status = CAM_REQ_CMP;
459                 break;
460         case XPT_SET_TRAN_SETTINGS:
461                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
462                 break;
463         case XPT_SCSI_IO:
464         {
465                 struct vscsi_xfer *xp;
466
467                 ccb->ccb_h.status = CAM_REQ_INPROG;
468
469                 xp = TAILQ_FIRST(&sc->free_xferq);
470                 if (xp == NULL)
471                         panic("SCSI queue flooded");
472                 xp->ccb = ccb;
473                 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
474                 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
475                 bus_dmamap_load_ccb(sc->data_tag, xp->dmamap,
476                     ccb, vscsi_scsi_command, xp, 0);
477
478                 return;
479         }
480         default:
481                 ccb->ccb_h.status = CAM_REQ_INVALID;
482                 break;
483         }
484
485         xpt_done(ccb);
486         return;
487 }
488
489 static void
490 vscsi_srp_login(struct vscsi_softc *sc)
491 {
492         struct vscsi_xfer *xp;
493         struct srp_login *login;
494         struct vscsi_crq crq;
495         int err;
496
497         mtx_assert(&sc->io_lock, MA_OWNED);
498
499         xp = TAILQ_FIRST(&sc->free_xferq);
500         if (xp == NULL)
501                 panic("SCSI queue flooded");
502         xp->ccb = NULL;
503         TAILQ_REMOVE(&sc->free_xferq, xp, queue);
504         TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
505         
506         /* Set up command */
507         xp->srp_iu_size = crq.iu_length = 64;
508         err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
509             M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
510         if (err)
511                 panic("Error during VMEM allocation (%d)", err);
512
513         login = (struct srp_login *)((uint8_t *)xp->sc->srp_iu_queue +
514             (uintptr_t)xp->srp_iu_offset);
515         bzero(login, xp->srp_iu_size);
516         login->type = SRP_LOGIN_REQ;
517         login->tag = (uint64_t)(xp);
518         login->max_cmd_length = htobe64(256);
519         login->buffer_formats = htobe16(0x1 | 0x2); /* Direct and indirect */
520         login->flags = 0;
521
522         /* Create CRQ entry */
523         crq.valid = 0x80;
524         crq.format = 0x01;
525         crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
526         bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
527
528         err = phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
529             ((uint64_t *)(&crq))[1]);
530         if (err != 0)
531                 panic("CRQ send failure (%d)", err);
532 }
533
534 static void
535 vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb)
536 {
537         struct srp_tsk_mgmt *cmd;
538         struct vscsi_xfer *xp;
539         struct vscsi_crq crq;
540         int err;
541
542         mtx_assert(&sc->io_lock, MA_OWNED);
543
544         xp = TAILQ_FIRST(&sc->free_xferq);
545         if (xp == NULL)
546                 panic("SCSI queue flooded");
547         xp->ccb = ccb;
548         TAILQ_REMOVE(&sc->free_xferq, xp, queue);
549         TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
550
551         if (!(ccb->ccb_h.xflags & CAM_EXTLUN_VALID)) {
552                 ccb->ccb_h.xflags |= CAM_EXTLUN_VALID;
553                 ccb->ccb_h.ext_lun.lun64 = (0x1UL << 62) |
554                     ((uint64_t)ccb->ccb_h.target_lun << 48);
555         }
556
557         xp->srp_iu_size = crq.iu_length = sizeof(*cmd);
558         err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
559             M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
560         if (err)
561                 panic("Error during VMEM allocation (%d)", err);
562
563         cmd = (struct srp_tsk_mgmt *)((uint8_t *)xp->sc->srp_iu_queue +
564             (uintptr_t)xp->srp_iu_offset);
565         bzero(cmd, xp->srp_iu_size);
566         cmd->type = SRP_TSK_MGMT;
567         cmd->tag = (uint64_t)xp;
568         cmd->lun = ccb->ccb_h.ext_lun.lun64;
569
570         switch (ccb->ccb_h.func_code) {
571         case XPT_RESET_DEV:
572                 cmd->function = 0x08;
573                 break;
574         default:
575                 panic("Unimplemented code %d", ccb->ccb_h.func_code);
576                 break;
577         }
578
579         bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
580
581         /* Create CRQ entry */
582         crq.valid = 0x80;
583         crq.format = 0x01;
584         crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
585
586         err = phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
587             ((uint64_t *)(&crq))[1]);
588         if (err != 0)
589                 panic("CRQ send failure (%d)", err);
590 }
591
592 static void
593 vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs, int nsegs, int err)
594 {
595         struct vscsi_xfer *xp = xxp;
596         uint8_t *cdb;
597         union ccb *ccb = xp->ccb;
598         struct srp_cmd *cmd;
599         uint64_t chunk_addr;
600         uint32_t chunk_size;
601         int desc_start, i;
602         struct vscsi_crq crq;
603
604         KASSERT(err == 0, ("DMA error %d\n", err));
605
606         mtx_assert(&xp->sc->io_lock, MA_OWNED);
607
608         cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
609             ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
610
611         if (!(ccb->ccb_h.xflags & CAM_EXTLUN_VALID)) {
612                 ccb->ccb_h.xflags |= CAM_EXTLUN_VALID;
613                 ccb->ccb_h.ext_lun.lun64 = (0x1UL << 62) |
614                     ((uint64_t)ccb->ccb_h.target_lun << 48);
615         }
616
617         /* Command format from Table 20, page 37 of SRP spec */
618         crq.iu_length = 48 + ((nsegs > 1) ? 20 : 16) + 
619             ((ccb->csio.cdb_len > 16) ? (ccb->csio.cdb_len - 16) : 0);
620         xp->srp_iu_size = crq.iu_length;
621         if (nsegs > 1)
622                 xp->srp_iu_size += nsegs*16;
623         xp->srp_iu_size = roundup(xp->srp_iu_size, 16);
624         err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
625             M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
626         if (err)
627                 panic("Error during VMEM allocation (%d)", err);
628
629         cmd = (struct srp_cmd *)((uint8_t *)xp->sc->srp_iu_queue +
630             (uintptr_t)xp->srp_iu_offset);
631         bzero(cmd, xp->srp_iu_size);
632         cmd->type = SRP_CMD;
633         if (ccb->csio.cdb_len > 16)
634                 cmd->additional_cdb = (ccb->csio.cdb_len - 16) << 2;
635         memcpy(cmd->cdb, cdb, ccb->csio.cdb_len);
636
637         cmd->tag = (uint64_t)(xp); /* Let the responder find this again */
638         cmd->lun = ccb->ccb_h.ext_lun.lun64;
639
640         if (nsegs > 1) {
641                 /* Use indirect descriptors */
642                 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
643                 case CAM_DIR_OUT:
644                         cmd->formats = (2 << 4);
645                         break;
646                 case CAM_DIR_IN:
647                         cmd->formats = 2;
648                         break;
649                 default:
650                         panic("Does not support bidirectional commands (%d)",
651                             ccb->ccb_h.flags & CAM_DIR_MASK);
652                         break;
653                 }
654
655                 desc_start = ((ccb->csio.cdb_len > 16) ?
656                     ccb->csio.cdb_len - 16 : 0);
657                 chunk_addr = xp->sc->srp_iu_phys + xp->srp_iu_offset + 20 +
658                     desc_start + sizeof(*cmd);
659                 chunk_size = 16*nsegs;
660                 memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
661                 memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
662                 chunk_size = 0;
663                 for (i = 0; i < nsegs; i++)
664                         chunk_size += segs[i].ds_len;
665                 memcpy(&cmd->data_payload[desc_start+16], &chunk_size, 4);
666                 desc_start += 20;
667                 for (i = 0; i < nsegs; i++) {
668                         chunk_addr = segs[i].ds_addr;
669                         chunk_size = segs[i].ds_len;
670
671                         memcpy(&cmd->data_payload[desc_start + 16*i],
672                             &chunk_addr, 8);
673                         /* Set handle tag to 0 */
674                         memcpy(&cmd->data_payload[desc_start + 16*i + 12],
675                             &chunk_size, 4);
676                 }
677         } else if (nsegs == 1) {
678                 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
679                 case CAM_DIR_OUT:
680                         cmd->formats = (1 << 4);
681                         break;
682                 case CAM_DIR_IN:
683                         cmd->formats = 1;
684                         break;
685                 default:
686                         panic("Does not support bidirectional commands (%d)",
687                             ccb->ccb_h.flags & CAM_DIR_MASK);
688                         break;
689                 }
690
691                 /*
692                  * Memory descriptor:
693                  * 8 byte address
694                  * 4 byte handle
695                  * 4 byte length
696                  */
697
698                 chunk_addr = segs[0].ds_addr;
699                 chunk_size = segs[0].ds_len;
700                 desc_start = ((ccb->csio.cdb_len > 16) ?
701                     ccb->csio.cdb_len - 16 : 0);
702
703                 memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
704                 /* Set handle tag to 0 */
705                 memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
706                 KASSERT(xp->srp_iu_size >= 48 + ((ccb->csio.cdb_len > 16) ?
707                     ccb->csio.cdb_len : 16), ("SRP IU command length"));
708         } else {
709                 cmd->formats = 0;
710         }
711         bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
712
713         /* Create CRQ entry */
714         crq.valid = 0x80;
715         crq.format = 0x01;
716         crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
717
718         err = phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
719             ((uint64_t *)(&crq))[1]);
720         if (err != 0)
721                 panic("CRQ send failure (%d)", err);
722 }
723
724 static void
725 vscsi_crq_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
726 {
727         struct vscsi_softc *sc = xsc;
728         
729         sc->crq_phys = segs[0].ds_addr;
730         sc->n_crqs = PAGE_SIZE/sizeof(struct vscsi_crq);
731
732         sc->srp_iu_queue = (uint8_t *)(sc->crq_queue);
733         sc->srp_iu_phys = segs[0].ds_addr;
734         sc->srp_iu_arena = vmem_create("VSCSI SRP IU", PAGE_SIZE,
735             segs[0].ds_len - PAGE_SIZE, 16, 0, M_BESTFIT | M_NOWAIT);
736 }
737
738 static void
739 vscsi_setup_bus(struct vscsi_softc *sc)
740 {
741         struct vscsi_crq crq;
742         struct vscsi_xfer *xp;
743         int error;
744
745         struct {
746                 uint32_t type;
747                 uint16_t status;
748                 uint16_t length;
749                 uint64_t tag;
750                 uint64_t buffer;
751                 struct {
752                         char srp_version[8];
753                         char partition_name[96];
754                         uint32_t partition_number;
755                         uint32_t mad_version;
756                         uint32_t os_type;
757                         uint32_t port_max_txu[8];
758                 } payload;
759         } mad_adapter_info;
760
761         bzero(&crq, sizeof(crq));
762
763         /* Init message */
764         crq.valid = 0xc0;
765         crq.format = 0x01;
766
767         do {
768                 error = phyp_hcall(H_FREE_CRQ, sc->unit);
769         } while (error == H_BUSY);
770
771         /* See initialization sequence page 757 */
772         bzero(sc->crq_queue, sc->n_crqs*sizeof(sc->crq_queue[0]));
773         sc->cur_crq = 0;
774         sc->bus_initialized = 0;
775         sc->bus_logged_in = 0;
776         bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
777         error = phyp_hcall(H_REG_CRQ, sc->unit, sc->crq_phys,
778             sc->n_crqs*sizeof(sc->crq_queue[0]));
779         KASSERT(error == 0, ("CRQ registration success"));
780
781         error = phyp_hcall(H_SEND_CRQ, sc->unit, ((uint64_t *)(&crq))[0],
782             ((uint64_t *)(&crq))[1]);
783         if (error != 0)
784                 panic("CRQ setup failure (%d)", error);
785
786         while (sc->bus_initialized == 0)
787                 vscsi_check_response_queue(sc);
788
789         /* Send MAD adapter info */
790         mad_adapter_info.type = MAD_ADAPTER_INFO_REQUEST;
791         mad_adapter_info.status = 0;
792         mad_adapter_info.length = sizeof(mad_adapter_info.payload);
793
794         strcpy(mad_adapter_info.payload.srp_version, "16.a");
795         strcpy(mad_adapter_info.payload.partition_name, "UNKNOWN");
796         mad_adapter_info.payload.partition_number = -1;
797         mad_adapter_info.payload.mad_version = 1;
798         mad_adapter_info.payload.os_type = 2; /* Claim we are Linux */
799         mad_adapter_info.payload.port_max_txu[0] = 0;
800         /* If this fails, we get the defaults above */
801         OF_getprop(OF_finddevice("/"), "ibm,partition-name",
802             mad_adapter_info.payload.partition_name,
803             sizeof(mad_adapter_info.payload.partition_name));
804         OF_getprop(OF_finddevice("/"), "ibm,partition-no",
805             &mad_adapter_info.payload.partition_number,
806             sizeof(mad_adapter_info.payload.partition_number));
807
808         xp = TAILQ_FIRST(&sc->free_xferq);
809         xp->ccb = NULL;
810         TAILQ_REMOVE(&sc->free_xferq, xp, queue);
811         TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
812         xp->srp_iu_size = crq.iu_length = sizeof(mad_adapter_info);
813         vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
814             M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
815         mad_adapter_info.buffer = xp->sc->srp_iu_phys + xp->srp_iu_offset + 24;
816         mad_adapter_info.tag = (uint64_t)xp;
817         memcpy((uint8_t *)xp->sc->srp_iu_queue + (uintptr_t)xp->srp_iu_offset,
818                 &mad_adapter_info, sizeof(mad_adapter_info));
819         crq.valid = 0x80;
820         crq.format = 0x02;
821         crq.iu_data = xp->sc->srp_iu_phys + xp->srp_iu_offset;
822         bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
823         phyp_hcall(H_SEND_CRQ, xp->sc->unit, ((uint64_t *)(&crq))[0],
824             ((uint64_t *)(&crq))[1]);
825
826         while (TAILQ_EMPTY(&sc->free_xferq))
827                 vscsi_check_response_queue(sc);
828
829         /* Send SRP login */
830         vscsi_srp_login(sc);
831         while (sc->bus_logged_in == 0)
832                 vscsi_check_response_queue(sc);
833
834         error = phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
835 }
836         
837
838 static void
839 vscsi_intr(void *xsc)
840 {
841         struct vscsi_softc *sc = xsc;
842
843         mtx_lock(&sc->io_lock);
844         vscsi_check_response_queue(sc);
845         mtx_unlock(&sc->io_lock);
846 }
847
848 static void
849 vscsi_srp_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
850 {
851         union ccb *ccb = xp->ccb;
852         struct vscsi_softc *sc = xp->sc;
853         struct srp_rsp *rsp;
854         uint32_t sense_len;
855
856         /* SRP response packet in original request */
857         rsp = (struct srp_rsp *)((uint8_t *)sc->srp_iu_queue +
858             (uintptr_t)xp->srp_iu_offset);
859         ccb->csio.scsi_status = rsp->status;
860         if (ccb->csio.scsi_status == SCSI_STATUS_OK)
861                 ccb->ccb_h.status = CAM_REQ_CMP;
862         else
863                 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
864 #ifdef NOTYET
865         /* Collect fast fail codes */
866         if (crq->status != 0)
867                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
868 #endif
869
870         if (ccb->ccb_h.status != CAM_REQ_CMP) {
871                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
872                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
873         }
874
875         if (!(rsp->flags & SRP_RSPVALID))
876                 rsp->response_data_len = 0;
877         if (!(rsp->flags & SRP_SNSVALID))
878                 rsp->sense_data_len = 0;
879         if (!(rsp->flags & (SRP_DOOVER | SRP_DOUNDER)))
880                 rsp->data_out_resid = 0;
881         if (!(rsp->flags & (SRP_DIOVER | SRP_DIUNDER)))
882                 rsp->data_in_resid = 0;
883
884         if (rsp->flags & SRP_SNSVALID) {
885                 bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data));
886                 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
887                 sense_len = min(be32toh(rsp->sense_data_len),
888                     ccb->csio.sense_len);
889                 memcpy(&ccb->csio.sense_data,
890                     &rsp->data_payload[be32toh(rsp->response_data_len)],
891                     sense_len);
892                 ccb->csio.sense_resid = ccb->csio.sense_len -
893                     be32toh(rsp->sense_data_len);
894         }
895
896         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
897         case CAM_DIR_OUT:
898                 ccb->csio.resid = rsp->data_out_resid;
899                 break;
900         case CAM_DIR_IN:
901                 ccb->csio.resid = rsp->data_in_resid;
902                 break;
903         }
904
905         bus_dmamap_sync(sc->data_tag, xp->dmamap, BUS_DMASYNC_POSTREAD);
906         bus_dmamap_unload(sc->data_tag, xp->dmamap);
907         xpt_done(ccb);
908         xp->ccb = NULL;
909 }
910
911 static void
912 vscsi_login_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
913 {
914         struct vscsi_softc *sc = xp->sc;
915         struct srp_login_rsp *rsp;
916
917         /* SRP response packet in original request */
918         rsp = (struct srp_login_rsp *)((uint8_t *)sc->srp_iu_queue +
919             (uintptr_t)xp->srp_iu_offset);
920         KASSERT(be16toh(rsp->buffer_formats) & 0x3, ("Both direct and indirect "
921             "buffers supported"));
922
923         sc->max_transactions = be32toh(rsp->request_limit_delta);
924         device_printf(sc->dev, "Queue depth %d commands\n",
925             sc->max_transactions);
926         sc->bus_logged_in = 1;
927 }
928
929 static void
930 vscsi_cam_poll(struct cam_sim *sim)
931 {
932         struct vscsi_softc *sc = cam_sim_softc(sim);
933
934         vscsi_check_response_queue(sc);
935 }
936
937 static void
938 vscsi_check_response_queue(struct vscsi_softc *sc)
939 {
940         struct vscsi_crq *crq;
941         struct vscsi_xfer *xp;
942         int code;
943
944         mtx_assert(&sc->io_lock, MA_OWNED);
945
946         phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
947         bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_POSTREAD);
948
949         while (sc->crq_queue[sc->cur_crq].valid != 0) {
950                 crq = &sc->crq_queue[sc->cur_crq];
951
952                 switch (crq->valid) {
953                 case 0xc0:
954                         if (crq->format == 0x02)
955                                 sc->bus_initialized = 1;
956                         break;
957                 case 0x80:
958                         /* IU data is set to tag pointer (the XP) */
959                         xp = (struct vscsi_xfer *)crq->iu_data;
960
961                         switch (crq->format) {
962                         case 0x01:
963                                 code = *((uint8_t *)sc->srp_iu_queue +
964                                     (uintptr_t)xp->srp_iu_offset);
965                                 switch (code) {
966                                 case SRP_RSP:
967                                         vscsi_srp_response(xp, crq);
968                                         break;
969                                 case SRP_LOGIN_RSP:
970                                         vscsi_login_response(xp, crq);
971                                         break;
972                                 default:
973                                         device_printf(sc->dev, "Unknown SRP "
974                                             "response code %d\n", code);
975                                         break;
976                                 }
977                                 break;
978                         case 0x02:
979                                 /* Ignore management datagrams */
980                                 break;
981                         default:
982                                 panic("Unknown CRQ format %d\n", crq->format);
983                                 break;
984                         }
985                         vmem_free(sc->srp_iu_arena, xp->srp_iu_offset,
986                             xp->srp_iu_size);
987                         TAILQ_REMOVE(&sc->active_xferq, xp, queue);
988                         TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
989                         break;
990                 default:
991                         device_printf(sc->dev,
992                             "Unknown CRQ message type %d\n", crq->valid);
993                         break;
994                 }
995
996                 crq->valid = 0;
997                 sc->cur_crq = (sc->cur_crq + 1) % sc->n_crqs;
998         };
999
1000         bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
1001         phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
1002 }
1003