2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2021 Microsoft Corp.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
40 #include <sys/module.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
47 #include <sys/eventhandler.h>
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <machine/in_cksum.h>
54 #include <net/if_var.h>
56 #include <dev/pci/pcivar.h>
57 #include <dev/pci/pcireg.h>
59 #include "gdma_util.h"
63 static mana_vendor_id_t mana_id_table[] = {
64 { PCI_VENDOR_ID_MICROSOFT, PCI_DEV_ID_MANA_VF},
69 static inline uint32_t
70 mana_gd_r32(struct gdma_context *g, uint64_t offset)
72 uint32_t v = bus_space_read_4(g->gd_bus.bar0_t,
73 g->gd_bus.bar0_h, offset);
78 #if defined(__amd64__)
79 static inline uint64_t
80 mana_gd_r64(struct gdma_context *g, uint64_t offset)
82 uint64_t v = bus_space_read_8(g->gd_bus.bar0_t,
83 g->gd_bus.bar0_h, offset);
88 static inline uint64_t
89 mana_gd_r64(struct gdma_context *g, uint64_t offset)
92 uint32_t *vp = (uint32_t *)&v;
94 *vp = mana_gd_r32(g, offset);
95 *(vp + 1) = mana_gd_r32(g, offset + 4);
102 mana_gd_query_max_resources(device_t dev)
104 struct gdma_context *gc = device_get_softc(dev);
105 struct gdma_query_max_resources_resp resp = {};
106 struct gdma_general_req req = {};
109 mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
110 sizeof(req), sizeof(resp));
112 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
113 if (err || resp.hdr.status) {
114 device_printf(gc->dev,
115 "Failed to query resource info: %d, 0x%x\n",
116 err, resp.hdr.status);
117 return err ? err : EPROTO;
120 mana_dbg(NULL, "max_msix %u, max_eq %u, max_cq %u, "
121 "max_sq %u, max_rq %u\n",
122 resp.max_msix, resp.max_eq, resp.max_cq,
123 resp.max_sq, resp.max_rq);
125 if (gc->num_msix_usable > resp.max_msix)
126 gc->num_msix_usable = resp.max_msix;
128 if (gc->num_msix_usable <= 1)
131 gc->max_num_queues = mp_ncpus;
132 if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
133 gc->max_num_queues = MANA_MAX_NUM_QUEUES;
135 if (gc->max_num_queues > resp.max_eq)
136 gc->max_num_queues = resp.max_eq;
138 if (gc->max_num_queues > resp.max_cq)
139 gc->max_num_queues = resp.max_cq;
141 if (gc->max_num_queues > resp.max_sq)
142 gc->max_num_queues = resp.max_sq;
144 if (gc->max_num_queues > resp.max_rq)
145 gc->max_num_queues = resp.max_rq;
151 mana_gd_detect_devices(device_t dev)
153 struct gdma_context *gc = device_get_softc(dev);
154 struct gdma_list_devices_resp resp = {};
155 struct gdma_general_req req = {};
156 struct gdma_dev_id gd_dev;
157 uint32_t i, max_num_devs;
161 mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
164 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
165 if (err || resp.hdr.status) {
166 device_printf(gc->dev,
167 "Failed to detect devices: %d, 0x%x\n", err,
169 return err ? err : EPROTO;
172 max_num_devs = min_t(uint32_t, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
174 for (i = 0; i < max_num_devs; i++) {
175 gd_dev = resp.devs[i];
176 dev_type = gd_dev.type;
178 mana_dbg(NULL, "gdma dev %d, type %u\n",
181 /* HWC is already detected in mana_hwc_create_channel(). */
182 if (dev_type == GDMA_DEVICE_HWC)
185 if (dev_type == GDMA_DEVICE_MANA) {
186 gc->mana.gdma_context = gc;
187 gc->mana.dev_id = gd_dev;
191 return gc->mana.dev_id.type == 0 ? ENODEV : 0;
195 mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
196 const void *req, uint32_t resp_len, void *resp)
198 struct hw_channel_context *hwc = gc->hwc.driver_data;
200 return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
204 mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
206 bus_addr_t *paddr = arg;
211 KASSERT(nseg == 1, ("too many segments %d!", nseg));
212 *paddr = segs->ds_addr;
216 mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
217 struct gdma_mem_info *gmi)
219 bus_addr_t dma_handle;
226 if (length < PAGE_SIZE || (length != roundup_pow_of_two(length)))
229 err = bus_dma_tag_create(bus_get_dma_tag(gc->dev), /* parent */
230 PAGE_SIZE, 0, /* alignment, boundary */
231 BUS_SPACE_MAXADDR, /* lowaddr */
232 BUS_SPACE_MAXADDR, /* highaddr */
233 NULL, NULL, /* filter, filterarg */
234 length, /* maxsize */
236 length, /* maxsegsize */
238 NULL, NULL, /* lockfunc, lockfuncarg*/
241 device_printf(gc->dev,
242 "failed to create dma tag, err: %d\n", err);
247 * Must have BUS_DMA_ZERO flag to clear the dma memory.
248 * Otherwise the queue overflow detection mechanism does
251 err = bus_dmamem_alloc(gmi->dma_tag, &buf,
252 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &gmi->dma_map);
254 device_printf(gc->dev,
255 "failed to alloc dma mem, err: %d\n", err);
256 bus_dma_tag_destroy(gmi->dma_tag);
260 err = bus_dmamap_load(gmi->dma_tag, gmi->dma_map, buf,
261 length, mana_gd_dma_map_paddr, &dma_handle, BUS_DMA_NOWAIT);
263 device_printf(gc->dev,
264 "failed to load dma mem, err: %d\n", err);
265 bus_dmamem_free(gmi->dma_tag, buf, gmi->dma_map);
266 bus_dma_tag_destroy(gmi->dma_tag);
271 gmi->dma_handle = dma_handle;
272 gmi->virt_addr = buf;
273 gmi->length = length;
279 mana_gd_free_memory(struct gdma_mem_info *gmi)
281 bus_dmamap_unload(gmi->dma_tag, gmi->dma_map);
282 bus_dmamem_free(gmi->dma_tag, gmi->virt_addr, gmi->dma_map);
283 bus_dma_tag_destroy(gmi->dma_tag);
287 mana_gd_create_hw_eq(struct gdma_context *gc,
288 struct gdma_queue *queue)
290 struct gdma_create_queue_resp resp = {};
291 struct gdma_create_queue_req req = {};
294 if (queue->type != GDMA_EQ)
297 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
298 sizeof(req), sizeof(resp));
300 req.hdr.dev_id = queue->gdma_dev->dev_id;
301 req.type = queue->type;
302 req.pdid = queue->gdma_dev->pdid;
303 req.doolbell_id = queue->gdma_dev->doorbell;
304 req.gdma_region = queue->mem_info.gdma_region;
305 req.queue_size = queue->queue_size;
306 req.log2_throttle_limit = queue->eq.log2_throttle_limit;
307 req.eq_pci_msix_index = queue->eq.msix_index;
309 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
310 if (err || resp.hdr.status) {
311 device_printf(gc->dev,
312 "Failed to create queue: %d, 0x%x\n",
313 err, resp.hdr.status);
314 return err ? err : EPROTO;
317 queue->id = resp.queue_index;
318 queue->eq.disable_needed = true;
319 queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
324 int mana_gd_disable_queue(struct gdma_queue *queue)
326 struct gdma_context *gc = queue->gdma_dev->gdma_context;
327 struct gdma_disable_queue_req req = {};
328 struct gdma_general_resp resp = {};
331 if (queue->type != GDMA_EQ)
332 mana_warn(NULL, "Not event queue type 0x%x\n",
335 mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
336 sizeof(req), sizeof(resp));
338 req.hdr.dev_id = queue->gdma_dev->dev_id;
339 req.type = queue->type;
340 req.queue_index = queue->id;
341 req.alloc_res_id_on_creation = 1;
343 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
344 if (err || resp.hdr.status) {
345 device_printf(gc->dev,
346 "Failed to disable queue: %d, 0x%x\n", err,
348 return err ? err : EPROTO;
354 #define DOORBELL_OFFSET_SQ 0x0
355 #define DOORBELL_OFFSET_RQ 0x400
356 #define DOORBELL_OFFSET_CQ 0x800
357 #define DOORBELL_OFFSET_EQ 0xFF8
360 mana_gd_ring_doorbell(struct gdma_context *gc, uint32_t db_index,
361 enum gdma_queue_type q_type, uint32_t qid,
362 uint32_t tail_ptr, uint8_t num_req)
364 union gdma_doorbell_entry e = {};
367 addr = (char *)gc->db_page_base + gc->db_page_size * db_index;
371 e.eq.tail_ptr = tail_ptr;
374 addr = (char *)addr + DOORBELL_OFFSET_EQ;
379 e.cq.tail_ptr = tail_ptr;
382 addr = (char *)addr + DOORBELL_OFFSET_CQ;
387 e.rq.tail_ptr = tail_ptr;
388 e.rq.wqe_cnt = num_req;
390 addr = (char *)addr + DOORBELL_OFFSET_RQ;
395 e.sq.tail_ptr = tail_ptr;
397 addr = (char *)addr + DOORBELL_OFFSET_SQ;
401 mana_warn(NULL, "Invalid queue type 0x%x\n", q_type);
405 /* Ensure all writes are done before ring doorbell */
408 #if defined(__amd64__)
409 writeq(addr, e.as_uint64);
411 uint32_t *p = (uint32_t *)&e.as_uint64;
413 writel((char *)addr + 4, *(p + 1));
418 mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
420 mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
421 queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
425 mana_gd_ring_cq(struct gdma_queue *cq, uint8_t arm_bit)
427 struct gdma_context *gc = cq->gdma_dev->gdma_context;
429 uint32_t num_cqe = cq->queue_size / GDMA_CQE_SIZE;
431 uint32_t head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
433 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
438 mana_gd_process_eqe(struct gdma_queue *eq)
440 uint32_t head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
441 struct gdma_context *gc = eq->gdma_dev->gdma_context;
442 struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
443 union gdma_eqe_info eqe_info;
444 enum gdma_eqe_type type;
445 struct gdma_event event;
446 struct gdma_queue *cq;
447 struct gdma_eqe *eqe;
450 eqe = &eq_eqe_ptr[head];
451 eqe_info.as_uint32 = eqe->eqe_info;
452 type = eqe_info.type;
455 case GDMA_EQE_COMPLETION:
456 cq_id = eqe->details[0] & 0xFFFFFF;
457 if (cq_id >= gc->max_num_cqs) {
459 "failed: cq_id %u > max_num_cqs %u\n",
460 cq_id, gc->max_num_cqs);
464 cq = gc->cq_table[cq_id];
465 if (!cq || cq->type != GDMA_CQ || cq->id != cq_id) {
467 "failed: invalid cq_id %u\n", cq_id);
472 cq->cq.callback(cq->cq.context, cq);
476 case GDMA_EQE_TEST_EVENT:
477 gc->test_event_eq_id = eq->id;
480 "EQE TEST EVENT received for EQ %u\n", eq->id);
482 complete(&gc->eq_test_event);
485 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
486 case GDMA_EQE_HWC_INIT_DATA:
487 case GDMA_EQE_HWC_INIT_DONE:
488 if (!eq->eq.callback)
492 memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
493 eq->eq.callback(eq->eq.context, eq, &event);
502 mana_gd_process_eq_events(void *arg)
504 uint32_t owner_bits, new_bits, old_bits;
505 union gdma_eqe_info eqe_info;
506 struct gdma_eqe *eq_eqe_ptr;
507 struct gdma_queue *eq = arg;
508 struct gdma_context *gc;
509 uint32_t head, num_eqe;
510 struct gdma_eqe *eqe;
513 gc = eq->gdma_dev->gdma_context;
515 num_eqe = eq->queue_size / GDMA_EQE_SIZE;
516 eq_eqe_ptr = eq->queue_mem_ptr;
518 bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map,
519 BUS_DMASYNC_POSTREAD);
521 /* Process up to 5 EQEs at a time, and update the HW head. */
522 for (i = 0; i < 5; i++) {
523 eqe = &eq_eqe_ptr[eq->head % num_eqe];
524 eqe_info.as_uint32 = eqe->eqe_info;
525 owner_bits = eqe_info.owner_bits;
527 old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
529 /* No more entries */
530 if (owner_bits == old_bits)
533 new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
534 if (owner_bits != new_bits) {
535 /* Something wrong. Log for debugging purpose */
536 device_printf(gc->dev,
537 "EQ %d: overflow detected, "
538 "i = %d, eq->head = %u "
539 "got owner_bits = %u, new_bits = %u "
540 "eqe addr %p, eqe->eqe_info 0x%x, "
541 "eqe type = %x, reserved1 = %x, client_id = %x, "
542 "reserved2 = %x, owner_bits = %x\n",
544 owner_bits, new_bits,
546 eqe_info.type, eqe_info.reserved1,
547 eqe_info.client_id, eqe_info.reserved2,
548 eqe_info.owner_bits);
550 uint32_t *eqe_dump = (uint32_t *) eq_eqe_ptr;
551 for (j = 0; j < 20; j++) {
552 device_printf(gc->dev, "%p: %x\t%x\t%x\t%x\n",
553 &eqe_dump[j * 4], eqe_dump[j * 4], eqe_dump[j * 4 + 1],
554 eqe_dump[j * 4 + 2], eqe_dump[j * 4 + 3]);
559 mana_gd_process_eqe(eq);
564 bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map,
565 BUS_DMASYNC_PREREAD);
567 head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
569 mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
574 mana_gd_register_irq(struct gdma_queue *queue,
575 const struct gdma_queue_spec *spec)
577 struct gdma_dev *gd = queue->gdma_dev;
578 struct gdma_irq_context *gic;
579 struct gdma_context *gc;
580 struct gdma_resource *r;
581 unsigned int msi_index;
584 gc = gd->gdma_context;
585 r = &gc->msix_resource;
587 mtx_lock_spin(&r->lock_spin);
589 msi_index = find_first_zero_bit(r->map, r->size);
590 if (msi_index >= r->size) {
593 bitmap_set(r->map, msi_index, 1);
594 queue->eq.msix_index = msi_index;
598 mtx_unlock_spin(&r->lock_spin);
603 if (unlikely(msi_index >= gc->num_msix_usable)) {
604 device_printf(gc->dev,
605 "chose an invalid msix index %d, usable %d\n",
606 msi_index, gc->num_msix_usable);
610 gic = &gc->irq_contexts[msi_index];
612 if (unlikely(gic->handler || gic->arg)) {
613 device_printf(gc->dev,
614 "interrupt handler or arg already assigned, "
615 "msix index: %d\n", msi_index);
620 gic->handler = mana_gd_process_eq_events;
622 mana_dbg(NULL, "registered msix index %d vector %d irq %ju\n",
623 msi_index, gic->msix_e.vector, rman_get_start(gic->res));
629 mana_gd_deregiser_irq(struct gdma_queue *queue)
631 struct gdma_dev *gd = queue->gdma_dev;
632 struct gdma_irq_context *gic;
633 struct gdma_context *gc;
634 struct gdma_resource *r;
635 unsigned int msix_index;
637 gc = gd->gdma_context;
638 r = &gc->msix_resource;
640 /* At most num_online_cpus() + 1 interrupts are used. */
641 msix_index = queue->eq.msix_index;
642 if (unlikely(msix_index >= gc->num_msix_usable))
645 gic = &gc->irq_contexts[msix_index];
649 mtx_lock_spin(&r->lock_spin);
650 bitmap_clear(r->map, msix_index, 1);
651 mtx_unlock_spin(&r->lock_spin);
653 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
655 mana_dbg(NULL, "deregistered msix index %d vector %d irq %ju\n",
656 msix_index, gic->msix_e.vector, rman_get_start(gic->res));
660 mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
662 struct gdma_generate_test_event_req req = {};
663 struct gdma_general_resp resp = {};
664 device_t dev = gc->dev;
667 sx_xlock(&gc->eq_test_event_sx);
669 init_completion(&gc->eq_test_event);
670 gc->test_event_eq_id = INVALID_QUEUE_ID;
672 mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
673 sizeof(req), sizeof(resp));
675 req.hdr.dev_id = eq->gdma_dev->dev_id;
676 req.queue_index = eq->id;
678 err = mana_gd_send_request(gc, sizeof(req), &req,
679 sizeof(resp), &resp);
681 device_printf(dev, "test_eq failed: %d\n", err);
687 if (resp.hdr.status) {
688 device_printf(dev, "test_eq failed: 0x%x\n",
693 if (wait_for_completion_timeout(&gc->eq_test_event, 30 * hz)) {
694 device_printf(dev, "test_eq timed out on queue %d\n",
699 if (eq->id != gc->test_event_eq_id) {
701 "test_eq got an event on wrong queue %d (%d)\n",
702 gc->test_event_eq_id, eq->id);
708 sx_xunlock(&gc->eq_test_event_sx);
713 mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
714 struct gdma_queue *queue)
719 err = mana_gd_test_eq(gc, queue);
721 device_printf(gc->dev,
722 "Failed to flush EQ: %d\n", err);
725 mana_gd_deregiser_irq(queue);
727 if (queue->eq.disable_needed)
728 mana_gd_disable_queue(queue);
731 static int mana_gd_create_eq(struct gdma_dev *gd,
732 const struct gdma_queue_spec *spec,
733 bool create_hwq, struct gdma_queue *queue)
735 struct gdma_context *gc = gd->gdma_context;
736 device_t dev = gc->dev;
737 uint32_t log2_num_entries;
740 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
742 log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
744 if (spec->eq.log2_throttle_limit > log2_num_entries) {
746 "EQ throttling limit (%lu) > maximum EQE (%u)\n",
747 spec->eq.log2_throttle_limit, log2_num_entries);
751 err = mana_gd_register_irq(queue, spec);
753 device_printf(dev, "Failed to register irq: %d\n", err);
757 queue->eq.callback = spec->eq.callback;
758 queue->eq.context = spec->eq.context;
759 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
760 queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
763 err = mana_gd_create_hw_eq(gc, queue);
767 err = mana_gd_test_eq(gc, queue);
774 device_printf(dev, "Failed to create EQ: %d\n", err);
775 mana_gd_destroy_eq(gc, false, queue);
780 mana_gd_create_cq(const struct gdma_queue_spec *spec,
781 struct gdma_queue *queue)
783 uint32_t log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
785 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
786 queue->cq.parent = spec->cq.parent_eq;
787 queue->cq.context = spec->cq.context;
788 queue->cq.callback = spec->cq.callback;
792 mana_gd_destroy_cq(struct gdma_context *gc,
793 struct gdma_queue *queue)
795 uint32_t id = queue->id;
797 if (id >= gc->max_num_cqs)
800 if (!gc->cq_table[id])
803 gc->cq_table[id] = NULL;
806 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
807 const struct gdma_queue_spec *spec,
808 struct gdma_queue **queue_ptr)
810 struct gdma_context *gc = gd->gdma_context;
811 struct gdma_mem_info *gmi;
812 struct gdma_queue *queue;
815 queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
819 gmi = &queue->mem_info;
820 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
826 queue->queue_mem_ptr = gmi->virt_addr;
827 queue->queue_size = spec->queue_size;
828 queue->monitor_avl_buf = spec->monitor_avl_buf;
829 queue->type = spec->type;
830 queue->gdma_dev = gd;
832 if (spec->type == GDMA_EQ)
833 err = mana_gd_create_eq(gd, spec, false, queue);
834 else if (spec->type == GDMA_CQ)
835 mana_gd_create_cq(spec, queue);
843 mana_gd_free_memory(gmi);
845 free(queue, M_DEVBUF);
850 mana_gd_destroy_dma_region(struct gdma_context *gc, uint64_t gdma_region)
852 struct gdma_destroy_dma_region_req req = {};
853 struct gdma_general_resp resp = {};
856 if (gdma_region == GDMA_INVALID_DMA_REGION)
859 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
861 req.gdma_region = gdma_region;
863 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
865 if (err || resp.hdr.status)
866 device_printf(gc->dev,
867 "Failed to destroy DMA region: %d, 0x%x\n",
868 err, resp.hdr.status);
872 mana_gd_create_dma_region(struct gdma_dev *gd,
873 struct gdma_mem_info *gmi)
875 unsigned int num_page = gmi->length / PAGE_SIZE;
876 struct gdma_create_dma_region_req *req = NULL;
877 struct gdma_create_dma_region_resp resp = {};
878 struct gdma_context *gc = gd->gdma_context;
879 struct hw_channel_context *hwc;
880 uint32_t length = gmi->length;
881 uint32_t req_msg_size;
885 if (length < PAGE_SIZE || !is_power_of_2(length)) {
886 mana_err(NULL, "gmi size incorrect: %u\n", length);
890 if (offset_in_page((uint64_t)gmi->virt_addr) != 0) {
891 mana_err(NULL, "gmi not page aligned: %p\n",
896 hwc = gc->hwc.driver_data;
897 req_msg_size = sizeof(*req) + num_page * sizeof(uint64_t);
898 if (req_msg_size > hwc->max_req_msg_size) {
899 mana_err(NULL, "req msg size too large: %u, %u\n",
900 req_msg_size, hwc->max_req_msg_size);
904 req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO);
908 mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
909 req_msg_size, sizeof(resp));
910 req->length = length;
911 req->offset_in_page = 0;
912 req->gdma_page_type = GDMA_PAGE_TYPE_4K;
913 req->page_count = num_page;
914 req->page_addr_list_len = num_page;
916 for (i = 0; i < num_page; i++)
917 req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
919 err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
923 if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
924 device_printf(gc->dev, "Failed to create DMA region: 0x%x\n",
930 gmi->gdma_region = resp.gdma_region;
937 mana_gd_create_mana_eq(struct gdma_dev *gd,
938 const struct gdma_queue_spec *spec,
939 struct gdma_queue **queue_ptr)
941 struct gdma_context *gc = gd->gdma_context;
942 struct gdma_mem_info *gmi;
943 struct gdma_queue *queue;
946 if (spec->type != GDMA_EQ)
949 queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
953 gmi = &queue->mem_info;
954 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
958 err = mana_gd_create_dma_region(gd, gmi);
964 queue->queue_mem_ptr = gmi->virt_addr;
965 queue->queue_size = spec->queue_size;
966 queue->monitor_avl_buf = spec->monitor_avl_buf;
967 queue->type = spec->type;
968 queue->gdma_dev = gd;
970 err = mana_gd_create_eq(gd, spec, true, queue);
978 mana_gd_free_memory(gmi);
980 free(queue, M_DEVBUF);
984 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
985 const struct gdma_queue_spec *spec,
986 struct gdma_queue **queue_ptr)
988 struct gdma_context *gc = gd->gdma_context;
989 struct gdma_mem_info *gmi;
990 struct gdma_queue *queue;
993 if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
994 spec->type != GDMA_RQ)
997 queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
1001 gmi = &queue->mem_info;
1002 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1006 err = mana_gd_create_dma_region(gd, gmi);
1012 queue->queue_mem_ptr = gmi->virt_addr;
1013 queue->queue_size = spec->queue_size;
1014 queue->monitor_avl_buf = spec->monitor_avl_buf;
1015 queue->type = spec->type;
1016 queue->gdma_dev = gd;
1018 if (spec->type == GDMA_CQ)
1019 mana_gd_create_cq(spec, queue);
1025 mana_gd_free_memory(gmi);
1027 free(queue, M_DEVBUF);
1032 mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
1034 struct gdma_mem_info *gmi = &queue->mem_info;
1036 switch (queue->type) {
1038 mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
1042 mana_gd_destroy_cq(gc, queue);
1052 device_printf(gc->dev,
1053 "Can't destroy unknown queue: type = %d\n",
1058 mana_gd_destroy_dma_region(gc, gmi->gdma_region);
1059 mana_gd_free_memory(gmi);
1060 free(queue, M_DEVBUF);
1063 #define OS_MAJOR_DIV 100000
1064 #define OS_BUILD_MOD 1000
1067 mana_gd_verify_vf_version(device_t dev)
1069 struct gdma_context *gc = device_get_softc(dev);
1070 struct gdma_verify_ver_resp resp = {};
1071 struct gdma_verify_ver_req req = {};
1074 mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
1075 sizeof(req), sizeof(resp));
1077 req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
1078 req.protocol_ver_max = GDMA_PROTOCOL_LAST;
1080 req.drv_ver = 0; /* Unused */
1081 req.os_type = 0x30; /* Other */
1082 req.os_ver_major = osreldate / OS_MAJOR_DIV;
1083 req.os_ver_minor = (osreldate % OS_MAJOR_DIV) / OS_BUILD_MOD;
1084 req.os_ver_build = osreldate % OS_BUILD_MOD;
1085 strncpy(req.os_ver_str1, ostype, sizeof(req.os_ver_str1) - 1);
1086 strncpy(req.os_ver_str2, osrelease, sizeof(req.os_ver_str2) - 1);
1088 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1089 if (err || resp.hdr.status) {
1090 device_printf(gc->dev,
1091 "VfVerifyVersionOutput: %d, status=0x%x\n",
1092 err, resp.hdr.status);
1093 return err ? err : EPROTO;
1100 mana_gd_register_device(struct gdma_dev *gd)
1102 struct gdma_context *gc = gd->gdma_context;
1103 struct gdma_register_device_resp resp = {};
1104 struct gdma_general_req req = {};
1107 gd->pdid = INVALID_PDID;
1108 gd->doorbell = INVALID_DOORBELL;
1109 gd->gpa_mkey = INVALID_MEM_KEY;
1111 mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
1114 req.hdr.dev_id = gd->dev_id;
1116 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1117 if (err || resp.hdr.status) {
1118 device_printf(gc->dev,
1119 "gdma_register_device_resp failed: %d, 0x%x\n",
1120 err, resp.hdr.status);
1121 return err ? err : -EPROTO;
1124 gd->pdid = resp.pdid;
1125 gd->gpa_mkey = resp.gpa_mkey;
1126 gd->doorbell = resp.db_id;
1128 mana_dbg(NULL, "mana device pdid %u, gpa_mkey %u, doorbell %u \n",
1129 gd->pdid, gd->gpa_mkey, gd->doorbell);
1135 mana_gd_deregister_device(struct gdma_dev *gd)
1137 struct gdma_context *gc = gd->gdma_context;
1138 struct gdma_general_resp resp = {};
1139 struct gdma_general_req req = {};
1142 if (gd->pdid == INVALID_PDID)
1145 mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
1148 req.hdr.dev_id = gd->dev_id;
1150 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1151 if (err || resp.hdr.status) {
1152 device_printf(gc->dev,
1153 "Failed to deregister device: %d, 0x%x\n",
1154 err, resp.hdr.status);
1159 gd->pdid = INVALID_PDID;
1160 gd->doorbell = INVALID_DOORBELL;
1161 gd->gpa_mkey = INVALID_MEM_KEY;
1167 mana_gd_wq_avail_space(struct gdma_queue *wq)
1169 uint32_t used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
1170 uint32_t wq_size = wq->queue_size;
1172 if (used_space > wq_size) {
1173 mana_warn(NULL, "failed: used space %u > queue size %u\n",
1174 used_space, wq_size);
1177 return wq_size - used_space;
1181 mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset)
1184 (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
1186 if ((offset + GDMA_WQE_BU_SIZE) > wq->queue_size) {
1187 mana_warn(NULL, "failed: write end out of queue bound %u, "
1189 offset + GDMA_WQE_BU_SIZE, wq->queue_size);
1192 return (uint8_t *)wq->queue_mem_ptr + offset;
1196 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
1197 enum gdma_queue_type q_type,
1198 uint32_t client_oob_size, uint32_t sgl_data_size,
1201 bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
1202 bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
1203 struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
1206 memset(header, 0, sizeof(struct gdma_wqe));
1207 header->num_sge = wqe_req->num_sge;
1208 header->inline_oob_size_div4 = client_oob_size / sizeof(uint32_t);
1211 if (!pad_data || wqe_req->num_sge < 2) {
1212 mana_warn(NULL, "no pad_data or num_sge < 2\n");
1215 header->client_oob_in_sgl = 1;
1218 header->last_vbytes = wqe_req->sgl[0].size;
1221 if (q_type == GDMA_SQ)
1222 header->client_data_unit = wqe_req->client_data_unit;
1225 * The size of gdma_wqe + client_oob_size must be less than or equal
1226 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1227 * the queue memory buffer boundary.
1229 ptr = wqe_ptr + sizeof(header);
1231 if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1232 memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1234 if (client_oob_size > wqe_req->inline_oob_size)
1235 memset(ptr + wqe_req->inline_oob_size, 0,
1236 client_oob_size - wqe_req->inline_oob_size);
1239 return sizeof(header) + client_oob_size;
1243 mana_gd_write_sgl(struct gdma_queue *wq, uint8_t *wqe_ptr,
1244 const struct gdma_wqe_request *wqe_req)
1246 uint32_t sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1247 const uint8_t *address = (uint8_t *)wqe_req->sgl;
1248 uint8_t *base_ptr, *end_ptr;
1249 uint32_t size_to_end;
1251 base_ptr = wq->queue_mem_ptr;
1252 end_ptr = base_ptr + wq->queue_size;
1253 size_to_end = (uint32_t)(end_ptr - wqe_ptr);
1255 if (size_to_end < sgl_size) {
1256 memcpy(wqe_ptr, address, size_to_end);
1259 address += size_to_end;
1260 sgl_size -= size_to_end;
1263 memcpy(wqe_ptr, address, sgl_size);
1267 mana_gd_post_work_request(struct gdma_queue *wq,
1268 const struct gdma_wqe_request *wqe_req,
1269 struct gdma_posted_wqe_info *wqe_info)
1271 uint32_t client_oob_size = wqe_req->inline_oob_size;
1272 struct gdma_context *gc;
1273 uint32_t sgl_data_size;
1274 uint32_t max_wqe_size;
1278 if (wqe_req->num_sge == 0)
1281 if (wq->type == GDMA_RQ) {
1282 if (client_oob_size != 0)
1285 client_oob_size = INLINE_OOB_SMALL_SIZE;
1287 max_wqe_size = GDMA_MAX_RQE_SIZE;
1289 if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1290 client_oob_size != INLINE_OOB_LARGE_SIZE)
1293 max_wqe_size = GDMA_MAX_SQE_SIZE;
1296 sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1297 wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1298 sgl_data_size, GDMA_WQE_BU_SIZE);
1299 if (wqe_size > max_wqe_size)
1302 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1303 gc = wq->gdma_dev->gdma_context;
1304 device_printf(gc->dev, "unsuccessful flow control!\n");
1309 wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1311 wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1312 wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1313 sgl_data_size, wqe_ptr);
1314 if (wqe_ptr >= (uint8_t *)wq->queue_mem_ptr + wq->queue_size)
1315 wqe_ptr -= wq->queue_size;
1317 mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1319 wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1321 bus_dmamap_sync(wq->mem_info.dma_tag, wq->mem_info.dma_map,
1322 BUS_DMASYNC_PREWRITE);
1328 mana_gd_post_and_ring(struct gdma_queue *queue,
1329 const struct gdma_wqe_request *wqe_req,
1330 struct gdma_posted_wqe_info *wqe_info)
1332 struct gdma_context *gc = queue->gdma_dev->gdma_context;
1335 err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1339 mana_gd_wq_ring_doorbell(gc, queue);
1345 mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1347 unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1348 struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1349 uint32_t owner_bits, new_bits, old_bits;
1350 struct gdma_cqe *cqe;
1352 cqe = &cq_cqe[cq->head % num_cqe];
1353 owner_bits = cqe->cqe_info.owner_bits;
1355 old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1356 /* Return 0 if no more entries. */
1357 if (owner_bits == old_bits)
1360 new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1361 /* Return -1 if overflow detected. */
1362 if (owner_bits != new_bits) {
1364 "overflow detected! owner_bits %u != new_bits %u\n",
1365 owner_bits, new_bits);
1369 comp->wq_num = cqe->cqe_info.wq_num;
1370 comp->is_sq = cqe->cqe_info.is_sq;
1371 memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1377 mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1382 bus_dmamap_sync(cq->mem_info.dma_tag, cq->mem_info.dma_map,
1383 BUS_DMASYNC_POSTREAD);
1385 for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1386 ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1389 cq->head -= cqe_idx;
1403 mana_gd_intr(void *arg)
1405 struct gdma_irq_context *gic = arg;
1408 gic->handler(gic->arg);
1413 mana_gd_alloc_res_map(uint32_t res_avail,
1414 struct gdma_resource *r, const char *lock_name)
1416 int n = howmany(res_avail, BITS_PER_LONG);
1419 malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO);
1423 r->size = res_avail;
1424 mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN);
1427 "total res %u, total number of unsigned longs %u\n",
1433 mana_gd_free_res_map(struct gdma_resource *r)
1438 free(r->map, M_DEVBUF);
1444 mana_gd_init_registers(struct gdma_context *gc)
1446 uint64_t bar0_va = rman_get_bushandle(gc->bar0);
1448 gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
1451 (void *) (bar0_va + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET));
1454 (void *) (bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET));
1456 mana_dbg(NULL, "db_page_size 0x%xx, db_page_base %p,"
1458 gc->db_page_size, gc->db_page_base, gc->shm_base);
1461 static struct resource *
1462 mana_gd_alloc_bar(device_t dev, int bar)
1464 struct resource *res = NULL;
1468 if (bar < 0 || bar > PCIR_MAX_BAR_0)
1471 pm = pci_find_bar(dev, PCIR_BAR(bar));
1475 if (PCI_BAR_IO(pm->pm_value))
1476 type = SYS_RES_IOPORT;
1478 type = SYS_RES_MEMORY;
1482 rid = PCIR_BAR(bar);
1483 res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE);
1484 #if defined(__amd64__)
1486 mana_dbg(NULL, "bar %d: rid 0x%x, type 0x%jx,"
1488 bar, rid, res->r_bustag, res->r_bushandle);
1496 mana_gd_free_pci_res(struct gdma_context *gc)
1501 if (gc->bar0 != NULL) {
1502 bus_release_resource(gc->dev, SYS_RES_MEMORY,
1503 PCIR_BAR(GDMA_BAR0), gc->bar0);
1506 if (gc->msix != NULL) {
1507 bus_release_resource(gc->dev, SYS_RES_MEMORY,
1508 gc->msix_rid, gc->msix);
1513 mana_gd_setup_irqs(device_t dev)
1515 unsigned int max_queues_per_port = mp_ncpus;
1516 struct gdma_context *gc = device_get_softc(dev);
1517 struct gdma_irq_context *gic;
1518 unsigned int max_irqs;
1522 if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1523 max_queues_per_port = MANA_MAX_NUM_QUEUES;
1525 /* Need 1 interrupt for the Hardware communication Channel (HWC) */
1526 max_irqs = max_queues_per_port + 1;
1529 rc = pci_alloc_msix(dev, &nvec);
1530 if (unlikely(rc != 0)) {
1532 "Failed to allocate MSIX, vectors %d, error: %d\n",
1535 goto err_setup_irq_alloc;
1538 if (nvec != max_irqs) {
1541 "Not enough number of MSI-x allocated: %d\n",
1544 goto err_setup_irq_release;
1546 device_printf(dev, "Allocated only %d MSI-x (%d requested)\n",
1550 gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context),
1551 M_DEVBUF, M_WAITOK | M_ZERO);
1552 if (!gc->irq_contexts) {
1554 goto err_setup_irq_release;
1557 for (i = 0; i < nvec; i++) {
1558 gic = &gc->irq_contexts[i];
1559 gic->msix_e.entry = i;
1560 /* Vector starts from 1. */
1561 gic->msix_e.vector = i + 1;
1562 gic->handler = NULL;
1565 gic->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1566 &gic->msix_e.vector, RF_ACTIVE | RF_SHAREABLE);
1567 if (unlikely(gic->res == NULL)) {
1569 device_printf(dev, "could not allocate resource "
1570 "for irq vector %d\n", gic->msix_e.vector);
1574 rc = bus_setup_intr(dev, gic->res,
1575 INTR_TYPE_NET | INTR_MPSAFE, NULL, mana_gd_intr,
1577 if (unlikely(rc != 0)) {
1578 device_printf(dev, "failed to register interrupt "
1579 "handler for irq %ju vector %d: error %d\n",
1580 rman_get_start(gic->res), gic->msix_e.vector, rc);
1583 gic->requested = true;
1585 mana_dbg(NULL, "added msix vector %d irq %ju\n",
1586 gic->msix_e.vector, rman_get_start(gic->res));
1589 rc = mana_gd_alloc_res_map(nvec, &gc->msix_resource,
1590 "gdma msix res lock");
1592 device_printf(dev, "failed to allocate memory "
1593 "for msix bitmap\n");
1597 gc->max_num_msix = nvec;
1598 gc->num_msix_usable = nvec;
1600 mana_dbg(NULL, "setup %d msix interrupts\n", nvec);
1605 for (; i >= 0; i--) {
1606 gic = &gc->irq_contexts[i];
1610 * If gic->requested is true, we need to free both intr and
1614 rcc = bus_teardown_intr(dev, gic->res, gic->cookie);
1615 if (unlikely(rcc != 0))
1616 device_printf(dev, "could not release "
1617 "irq vector %d, error: %d\n",
1618 gic->msix_e.vector, rcc);
1621 if (gic->res != NULL) {
1622 rcc = bus_release_resource(dev, SYS_RES_IRQ,
1623 gic->msix_e.vector, gic->res);
1625 if (unlikely(rcc != 0))
1626 device_printf(dev, "dev has no parent while "
1627 "releasing resource for irq vector %d\n",
1628 gic->msix_e.vector);
1629 gic->requested = false;
1633 free(gc->irq_contexts, M_DEVBUF);
1634 gc->irq_contexts = NULL;
1635 err_setup_irq_release:
1636 pci_release_msi(dev);
1637 err_setup_irq_alloc:
1642 mana_gd_remove_irqs(device_t dev)
1644 struct gdma_context *gc = device_get_softc(dev);
1645 struct gdma_irq_context *gic;
1648 mana_gd_free_res_map(&gc->msix_resource);
1650 for (i = 0; i < gc->max_num_msix; i++) {
1651 gic = &gc->irq_contexts[i];
1652 if (gic->requested) {
1653 rc = bus_teardown_intr(dev, gic->res, gic->cookie);
1654 if (unlikely(rc != 0)) {
1655 device_printf(dev, "failed to tear down "
1656 "irq vector %d, error: %d\n",
1657 gic->msix_e.vector, rc);
1659 gic->requested = false;
1662 if (gic->res != NULL) {
1663 rc = bus_release_resource(dev, SYS_RES_IRQ,
1664 gic->msix_e.vector, gic->res);
1665 if (unlikely(rc != 0)) {
1666 device_printf(dev, "dev has no parent while "
1667 "releasing resource for irq vector %d\n",
1668 gic->msix_e.vector);
1674 gc->max_num_msix = 0;
1675 gc->num_msix_usable = 0;
1676 free(gc->irq_contexts, M_DEVBUF);
1677 gc->irq_contexts = NULL;
1679 pci_release_msi(dev);
1683 mana_gd_probe(device_t dev)
1685 mana_vendor_id_t *ent;
1686 char adapter_name[60];
1687 uint16_t pci_vendor_id = 0;
1688 uint16_t pci_device_id = 0;
1690 pci_vendor_id = pci_get_vendor(dev);
1691 pci_device_id = pci_get_device(dev);
1693 ent = mana_id_table;
1694 while (ent->vendor_id != 0) {
1695 if ((pci_vendor_id == ent->vendor_id) &&
1696 (pci_device_id == ent->device_id)) {
1697 mana_dbg(NULL, "vendor=%x device=%x\n",
1698 pci_vendor_id, pci_device_id);
1700 sprintf(adapter_name, DEVICE_DESC);
1701 device_set_desc_copy(dev, adapter_name);
1702 return (BUS_PROBE_DEFAULT);
1712 * mana_attach - Device Initialization Routine
1713 * @dev: device information struct
1715 * Returns 0 on success, otherwise on failure.
1717 * mana_attach initializes a GDMA adapter identified by a device structure.
1720 mana_gd_attach(device_t dev)
1722 struct gdma_context *gc;
1726 gc = device_get_softc(dev);
1729 pci_enable_io(dev, SYS_RES_IOPORT);
1730 pci_enable_io(dev, SYS_RES_MEMORY);
1732 pci_enable_busmaster(dev);
1734 gc->bar0 = mana_gd_alloc_bar(dev, GDMA_BAR0);
1735 if (unlikely(gc->bar0 == NULL)) {
1737 "unable to allocate bus resource for bar0!\n");
1739 goto err_disable_dev;
1742 /* Store bar0 tage and handle for quick access */
1743 gc->gd_bus.bar0_t = rman_get_bustag(gc->bar0);
1744 gc->gd_bus.bar0_h = rman_get_bushandle(gc->bar0);
1746 /* Map MSI-x vector table */
1747 msix_rid = pci_msix_table_bar(dev);
1749 mana_dbg(NULL, "msix_rid 0x%x\n", msix_rid);
1751 gc->msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1752 &msix_rid, RF_ACTIVE);
1753 if (unlikely(gc->msix == NULL)) {
1755 "unable to allocate bus resource for msix!\n");
1757 goto err_free_pci_res;
1759 gc->msix_rid = msix_rid;
1761 if (unlikely(gc->gd_bus.bar0_h == 0)) {
1762 device_printf(dev, "failed to map bar0!\n");
1764 goto err_free_pci_res;
1767 mana_gd_init_registers(gc);
1769 mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1771 rc = mana_gd_setup_irqs(dev);
1773 goto err_free_pci_res;
1776 sx_init(&gc->eq_test_event_sx, "gdma test event sx");
1778 rc = mana_hwc_create_channel(gc);
1780 mana_dbg(NULL, "Failed to create hwc channel\n");
1782 goto err_clean_up_gdma;
1784 goto err_remove_irq;
1787 rc = mana_gd_verify_vf_version(dev);
1789 mana_dbg(NULL, "Failed to verify vf\n");
1790 goto err_clean_up_gdma;
1793 rc = mana_gd_query_max_resources(dev);
1795 mana_dbg(NULL, "Failed to query max resources\n");
1796 goto err_clean_up_gdma;
1799 rc = mana_gd_detect_devices(dev);
1801 mana_dbg(NULL, "Failed to detect mana device\n");
1802 goto err_clean_up_gdma;
1805 rc = mana_probe(&gc->mana);
1807 mana_dbg(NULL, "Failed to probe mana device\n");
1808 goto err_clean_up_gdma;
1814 mana_hwc_destroy_channel(gc);
1816 mana_gd_remove_irqs(dev);
1818 mana_gd_free_pci_res(gc);
1820 pci_disable_busmaster(dev);
1826 * mana_detach - Device Removal Routine
1827 * @pdev: device information struct
1829 * mana_detach is called by the device subsystem to alert the driver
1830 * that it should release a PCI device.
1833 mana_gd_detach(device_t dev)
1835 struct gdma_context *gc = device_get_softc(dev);
1837 mana_remove(&gc->mana);
1839 mana_hwc_destroy_channel(gc);
1841 mana_gd_remove_irqs(dev);
1843 mana_gd_free_pci_res(gc);
1845 pci_disable_busmaster(dev);
1847 return (bus_generic_detach(dev));
1851 /*********************************************************************
1852 * FreeBSD Device Interface Entry Points
1853 *********************************************************************/
1855 static device_method_t mana_methods[] = {
1856 /* Device interface */
1857 DEVMETHOD(device_probe, mana_gd_probe),
1858 DEVMETHOD(device_attach, mana_gd_attach),
1859 DEVMETHOD(device_detach, mana_gd_detach),
1863 static driver_t mana_driver = {
1864 "mana", mana_methods, sizeof(struct gdma_context),
1867 DRIVER_MODULE(mana, pci, mana_driver, 0, 0);
1868 MODULE_PNP_INFO("U16:vendor;U16:device", pci, mana, mana_id_table,
1869 nitems(mana_id_table) - 1);
1870 MODULE_DEPEND(mana, pci, 1, 1, 1);
1871 MODULE_DEPEND(mana, ether, 1, 1, 1);
1873 /*********************************************************************/