2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2021 - 2022 Intel Corporation
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include "irdma_di_if.h"
39 #include "irdma_main.h"
40 #include <sys/gsb_crc32.h>
41 #include <netinet/in_fib.h>
42 #include <netinet6/in6_fib.h>
43 #include <net/route/nhop.h>
45 /* additional QP debuging option. Keep false unless needed */
46 bool irdma_upload_context = false;
49 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
51 KASSERT(reg < dev_ctx->mem_bus_space_size,
52 ("irdma: register offset %#jx too large (max is %#jx)",
53 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
55 return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
56 dev_ctx->mem_bus_space_handle, reg));
60 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
63 KASSERT(reg < dev_ctx->mem_bus_space_size,
64 ("irdma: register offset %#jx too large (max is %#jx)",
65 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
67 bus_space_write_4(dev_ctx->mem_bus_space_tag,
68 dev_ctx->mem_bus_space_handle, reg, value);
72 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
74 KASSERT(reg < dev_ctx->mem_bus_space_size,
75 ("irdma: register offset %#jx too large (max is %#jx)",
76 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
78 return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
79 dev_ctx->mem_bus_space_handle, reg));
83 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
86 KASSERT(reg < dev_ctx->mem_bus_space_size,
87 ("irdma: register offset %#jx too large (max is %#jx)",
88 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
90 bus_space_write_8(dev_ctx->mem_bus_space_tag,
91 dev_ctx->mem_bus_space_handle, reg, value);
96 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
98 struct irdma_device *iwdev = vsi->back_vsi;
99 struct ice_rdma_peer *peer = iwdev->rf->peer_info;
100 struct ice_rdma_request req = {0};
101 struct ice_rdma_qset_update *res = &req.res;
103 req.type = ICE_RDMA_EVENT_QSET_REGISTER;
105 res->res_type = ICE_RDMA_QSET_ALLOC;
106 res->qsets.qs_handle = tc_node->qs_handle;
107 res->qsets.tc = tc_node->traffic_class;
108 res->qsets.vsi_id = vsi->vsi_idx;
110 IRDMA_DI_REQ_HANDLER(peer, &req);
112 tc_node->l2_sched_node_id = res->qsets.teid;
113 vsi->qos[tc_node->user_pri].l2_sched_node_id =
120 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
122 struct irdma_device *iwdev = vsi->back_vsi;
123 struct ice_rdma_peer *peer = iwdev->rf->peer_info;
124 struct ice_rdma_request req = {0};
125 struct ice_rdma_qset_update *res = &req.res;
127 req.type = ICE_RDMA_EVENT_QSET_REGISTER;
128 res->res_allocated = 1;
129 res->res_type = ICE_RDMA_QSET_FREE;
130 res->qsets.vsi_id = vsi->vsi_idx;
131 res->qsets.teid = tc_node->l2_sched_node_id;
132 res->qsets.qs_handle = tc_node->qs_handle;
134 IRDMA_DI_REQ_HANDLER(peer, &req);
138 hw_to_dev(struct irdma_hw *hw)
140 struct irdma_pci_f *rf;
142 rf = container_of(hw, struct irdma_pci_f, hw);
147 irdma_free_hash_desc(void *desc)
153 irdma_init_hash_desc(void **desc)
159 irdma_ieq_check_mpacrc(void *desc,
160 void *addr, u32 len, u32 val)
162 u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
166 irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
169 printf("%s: result crc=%x value=%x\n", __func__, crc, val);
174 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
175 * @iwdev: irdma device
176 * @ifp: interface network device pointer
179 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
181 struct ifaddr *ifa, *tmp;
182 struct sockaddr_in6 *sin6;
183 u32 local_ipaddr6[4];
185 char ip6buf[INET6_ADDRSTRLEN];
188 IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
189 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
190 if (sin6->sin6_family != AF_INET6)
193 irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
194 mac_addr = IF_LLADDR(ifp);
196 printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
198 ip6_sprintf(ip6buf, &sin6->sin6_addr),
199 mac_addr[0], mac_addr[1], mac_addr[2],
200 mac_addr[3], mac_addr[4], mac_addr[5]);
202 irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
206 if_addr_runlock(ifp);
210 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
211 * @iwdev: irdma device
212 * @ifp: interface network device pointer
215 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
218 struct sockaddr_in *sin;
223 IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
224 sin = (struct sockaddr_in *)ifa->ifa_addr;
225 if (sin->sin_family != AF_INET)
228 ip_addr[0] = ntohl(sin->sin_addr.s_addr);
229 mac_addr = IF_LLADDR(ifp);
231 printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
234 (ip_addr[0] >> 16) & 0xFF,
235 (ip_addr[0] >> 8) & 0xFF,
237 mac_addr[0], mac_addr[1], mac_addr[2],
238 mac_addr[3], mac_addr[4], mac_addr[5]);
240 irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
243 if_addr_runlock(ifp);
247 * irdma_add_ip - add ip addresses
248 * @iwdev: irdma device
250 * Add ipv4/ipv6 addresses to the arp cache
253 irdma_add_ip(struct irdma_device *iwdev)
255 struct ifnet *ifp = iwdev->netdev;
259 irdma_add_ipv4_addr(iwdev, ifp);
260 irdma_add_ipv6_addr(iwdev, ifp);
261 for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
262 ifv = VLAN_DEVAT(ifp, i);
265 irdma_add_ipv4_addr(iwdev, ifv);
266 irdma_add_ipv6_addr(iwdev, ifv);
271 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
273 struct irdma_pci_f *rf = arg;
274 struct ifnet *ifv = NULL;
275 struct sockaddr_in *sin;
276 struct epoch_tracker et;
277 int arp_index = 0, i = 0;
280 if (!ifa || !ifa->ifa_addr || !ifp)
282 if (rf->iwdev->netdev != ifp) {
283 for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
285 ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
293 sin = (struct sockaddr_in *)ifa->ifa_addr;
296 case IFADDR_EVENT_ADD:
297 if (sin->sin_family == AF_INET)
298 irdma_add_ipv4_addr(rf->iwdev, ifp);
299 else if (sin->sin_family == AF_INET6)
300 irdma_add_ipv6_addr(rf->iwdev, ifp);
302 case IFADDR_EVENT_DEL:
303 if (sin->sin_family == AF_INET) {
304 ip[0] = ntohl(sin->sin_addr.s_addr);
305 } else if (sin->sin_family == AF_INET6) {
306 irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
310 for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
311 if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
312 irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
313 rf->arp_table[arp_index].ip_addr,
324 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
326 rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
327 irdma_ifaddrevent_handler,
329 EVENTHANDLER_PRI_ANY);
333 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
335 EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
339 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
340 struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
342 struct nhop_object *nh;
344 if (dst_sin->sa_family == AF_INET6)
345 nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
347 nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
348 if (!nh || (nh->nh_ifp != netdev &&
349 rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
351 *gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
352 *nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
358 pr_err("irdma: route not found\n");
363 * irdma_get_dst_mac - get destination mac address
364 * @cm_node: connection's node
365 * @dst_sin: destination address information
366 * @dst_mac: mac address array to return
369 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
371 struct ifnet *netdev = cm_node->iwdev->netdev;
373 struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
374 struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
378 struct sockaddr *nexthop;
379 struct epoch_tracker et;
384 CURVNET_SET_QUIET(vnet);
385 err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
389 if (dst_sin->sa_family == AF_INET) {
390 err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
391 } else if (dst_sin->sa_family == AF_INET6) {
392 err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
394 err = -EPROTONOSUPPORT;
401 pr_err("failed to resolve neighbor address (err=%d)\n",
410 * irdma_addr_resolve_neigh - resolve neighbor address
411 * @cm_node: connection's node
412 * @dst_ip: remote ip address
413 * @arpindex: if there is an arp entry
416 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
417 u32 dst_ip, int arpindex)
419 struct irdma_device *iwdev = cm_node->iwdev;
420 struct sockaddr_in dst_sin = {};
423 u8 dst_mac[MAX_ADDR_LEN];
425 dst_sin.sin_len = sizeof(dst_sin);
426 dst_sin.sin_family = AF_INET;
427 dst_sin.sin_port = 0;
428 dst_sin.sin_addr.s_addr = htonl(dst_ip);
430 err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
436 return irdma_add_arp(iwdev->rf, ip, dst_mac);
440 * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
441 * @cm_node: connection's node
442 * @dest: remote ip address
443 * @arpindex: if there is an arp entry
446 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
447 u32 *dest, int arpindex)
449 struct irdma_device *iwdev = cm_node->iwdev;
450 struct sockaddr_in6 dst_addr = {};
452 u8 dst_mac[MAX_ADDR_LEN];
454 dst_addr.sin6_family = AF_INET6;
455 dst_addr.sin6_len = sizeof(dst_addr);
456 dst_addr.sin6_scope_id = iwdev->netdev->if_index;
458 irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
459 err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
463 return irdma_add_arp(iwdev->rf, dest, dst_mac);
467 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
468 struct irdma_cm_info *cm_info)
470 struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
471 struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
475 if ((cm_node->ipv4 &&
476 irdma_ipv4_is_lpb(vnet, cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
478 irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
479 cm_node->do_lpb = true;
480 arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
484 oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
488 arpindex = irdma_addr_resolve_neigh(cm_node,
489 cm_info->rem_addr[0],
492 arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
500 * irdma_add_handler - add a handler to the list
501 * @hdl: handler to be added to the handler list
504 irdma_add_handler(struct irdma_handler *hdl)
508 spin_lock_irqsave(&irdma_handler_lock, flags);
509 list_add(&hdl->list, &irdma_handlers);
510 spin_unlock_irqrestore(&irdma_handler_lock, flags);
514 * irdma_del_handler - delete a handler from the list
515 * @hdl: handler to be deleted from the handler list
518 irdma_del_handler(struct irdma_handler *hdl)
522 spin_lock_irqsave(&irdma_handler_lock, flags);
523 list_del(&hdl->list);
524 spin_unlock_irqrestore(&irdma_handler_lock, flags);
528 * irdma_set_rf_user_cfg_params - apply user configurable settings
529 * @rf: RDMA PCI function
532 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
534 int en_rem_endpoint_trk = 0;
537 rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
538 rf->limits_sel = limits_sel;
539 rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
540 /* Enable DCQCN algorithm by default */
541 rf->dcqcn_ena = true;
545 * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
546 * @arg1: pointer to rf
548 * @oidp: sysctl oid structure
549 * @req: sysctl request pointer
552 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
554 struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
556 u8 dcqcn_ena = rf->dcqcn_ena;
558 ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
559 if ((ret) || (req->newptr == NULL))
562 rf->dcqcn_ena = false;
564 rf->dcqcn_ena = true;
570 * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
571 * @rf: RDMA PCI function
573 * Create DCQCN related sysctls for the driver.
574 * dcqcn_ena is writeable settings and applicable to next QP creation or
576 * all other settings are of RDTUN type (read on driver load) and are
577 * applicable only to CQP creation.
580 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
582 struct sysctl_oid_list *irdma_sysctl_oid_list;
584 irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
586 SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
587 OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
588 irdma_sysctl_dcqcn_update, "A",
589 "enables DCQCN algorithm for RoCEv2 on all ports, default=true");
591 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
592 OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
593 &rf->dcqcn_params.cc_cfg_valid, 0,
594 "set DCQCN parameters to be valid, default=false");
596 rf->dcqcn_params.min_dec_factor = 1;
597 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
598 OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
599 &rf->dcqcn_params.min_dec_factor, 0,
600 "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
602 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
603 OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
604 &rf->dcqcn_params.min_rate, 0,
605 "set minimum rate limit value, in MBits per second, default=0");
607 SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
608 OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
609 "set number of times to stay in each stage of bandwidth recovery, default=0");
611 SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
612 OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
613 "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
615 SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
616 OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
617 "set number of MSS to add to the congestion window in additive increase mode, default=0");
619 SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
620 OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
621 &rf->dcqcn_params.rai_factor, 0,
622 "set number of MSS to add to the congestion window in additive increase mode, default=0");
624 SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
625 OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
626 &rf->dcqcn_params.hai_factor, 0,
627 "set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
629 SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
630 OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
631 &rf->dcqcn_params.rreduce_mperiod, 0,
632 "set minimum time between 2 consecutive rate reductions for a single flow, default=0");
636 * irdma_dmamap_cb - callback for bus_dmamap_load
639 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
643 *(bus_addr_t *) arg = segs->ds_addr;
648 * irdma_allocate_dma_mem - allocate dma memory
649 * @hw: pointer to hw structure
650 * @mem: structure holding memory information
651 * @size: requested size
652 * @alignment: requested alignment
655 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
656 u64 size, u32 alignment)
658 struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
659 device_t dev = dev_ctx->dev;
663 ret = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
664 alignment, 0, /* alignment, bounds */
665 BUS_SPACE_MAXADDR, /* lowaddr */
666 BUS_SPACE_MAXADDR, /* highaddr */
667 NULL, NULL, /* filter, filterarg */
670 size, /* maxsegsize */
671 BUS_DMA_ALLOCNOW, /* flags */
673 NULL, /* lockfuncarg */
676 device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
680 ret = bus_dmamem_alloc(mem->tag, (void **)&va,
681 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
683 device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
687 ret = bus_dmamap_load(mem->tag, mem->map, va, size,
688 irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
690 device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
696 bus_dmamap_sync(mem->tag, mem->map,
697 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
701 bus_dmamem_free(mem->tag, va, mem->map);
703 bus_dma_tag_destroy(mem->tag);
712 * irdma_free_dma_mem - Memory free helper fn
713 * @hw: pointer to hw structure
714 * @mem: ptr to mem struct to free
717 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
721 bus_dmamap_sync(mem->tag, mem->map,
722 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
723 bus_dmamap_unload(mem->tag, mem->map);
726 bus_dmamem_free(mem->tag, mem->va, mem->map);
727 bus_dma_tag_destroy(mem->tag);
735 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
737 kfree(chunk->bitmapmem.va);