1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include <sys/limits.h>
39 /********************************************************************
40 * Manage DMA'able memory.
41 *******************************************************************/
43 i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
47 *(bus_addr_t *) arg = segs->ds_addr;
52 i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
54 mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
55 return(mem->va == NULL);
59 i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
61 free(mem->va, M_DEVBUF);
68 i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
69 enum i40e_memory_type type __unused, u64 size, u32 alignment)
71 device_t dev = ((struct i40e_osdep *)hw->back)->dev;
75 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
76 alignment, 0, /* alignment, bounds */
77 BUS_SPACE_MAXADDR, /* lowaddr */
78 BUS_SPACE_MAXADDR, /* highaddr */
79 NULL, NULL, /* filter, filterarg */
82 size, /* maxsegsize */
83 BUS_DMA_ALLOCNOW, /* flags */
85 NULL, /* lockfuncarg */
89 "i40e_allocate_dma: bus_dma_tag_create failed, "
93 err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
94 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
97 "i40e_allocate_dma: bus_dmamem_alloc failed, "
101 err = bus_dmamap_load(mem->tag, mem->map, mem->va,
108 "i40e_allocate_dma: bus_dmamap_load failed, "
114 bus_dmamap_sync(mem->tag, mem->map,
115 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
118 bus_dmamem_free(mem->tag, mem->va, mem->map);
120 bus_dma_tag_destroy(mem->tag);
128 i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
130 bus_dmamap_sync(mem->tag, mem->map,
131 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
132 bus_dmamap_unload(mem->tag, mem->map);
133 bus_dmamem_free(mem->tag, mem->va, mem->map);
134 bus_dma_tag_destroy(mem->tag);
135 return (I40E_SUCCESS);
139 i40e_init_spinlock(struct i40e_spinlock *lock)
141 mtx_init(&lock->mutex, "mutex",
142 "ixl spinlock", MTX_DEF | MTX_DUPOK);
146 i40e_acquire_spinlock(struct i40e_spinlock *lock)
148 mtx_lock(&lock->mutex);
152 i40e_release_spinlock(struct i40e_spinlock *lock)
154 mtx_unlock(&lock->mutex);
158 i40e_destroy_spinlock(struct i40e_spinlock *lock)
160 if (mtx_initialized(&lock->mutex))
161 mtx_destroy(&lock->mutex);
170 return (x*(hz/1000));
172 return (max(1, x/(1000/hz)));
176 i40e_msec_pause(int msecs)
178 if (cold || SCHEDULER_STOPPED())
179 i40e_msec_delay(msecs);
181 // ERJ: (msecs * hz) could overflow
182 pause("ixl", ixl_ms_scale(msecs));
186 * Helper function for debug statement printing
189 i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
194 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
197 dev = ((struct i40e_osdep *)hw->back)->dev;
199 /* Re-implement device_printf() */
200 device_print_prettyname(dev);
207 ixl_vc_opcode_str(uint16_t op)
210 case VIRTCHNL_OP_VERSION:
212 case VIRTCHNL_OP_RESET_VF:
214 case VIRTCHNL_OP_GET_VF_RESOURCES:
215 return ("GET_VF_RESOURCES");
216 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
217 return ("CONFIG_TX_QUEUE");
218 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
219 return ("CONFIG_RX_QUEUE");
220 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
221 return ("CONFIG_VSI_QUEUES");
222 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
223 return ("CONFIG_IRQ_MAP");
224 case VIRTCHNL_OP_ENABLE_QUEUES:
225 return ("ENABLE_QUEUES");
226 case VIRTCHNL_OP_DISABLE_QUEUES:
227 return ("DISABLE_QUEUES");
228 case VIRTCHNL_OP_ADD_ETH_ADDR:
229 return ("ADD_ETH_ADDR");
230 case VIRTCHNL_OP_DEL_ETH_ADDR:
231 return ("DEL_ETH_ADDR");
232 case VIRTCHNL_OP_ADD_VLAN:
234 case VIRTCHNL_OP_DEL_VLAN:
236 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
237 return ("CONFIG_PROMISCUOUS_MODE");
238 case VIRTCHNL_OP_GET_STATS:
239 return ("GET_STATS");
240 case VIRTCHNL_OP_RSVD:
242 case VIRTCHNL_OP_EVENT:
244 case VIRTCHNL_OP_CONFIG_RSS_KEY:
245 return ("CONFIG_RSS_KEY");
246 case VIRTCHNL_OP_CONFIG_RSS_LUT:
247 return ("CONFIG_RSS_LUT");
248 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
249 return ("GET_RSS_HENA_CAPS");
250 case VIRTCHNL_OP_SET_RSS_HENA:
251 return ("SET_RSS_HENA");
258 i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg)
262 value = pci_read_config(((struct i40e_osdep *)hw->back)->dev,
269 i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value)
271 pci_write_config(((struct i40e_osdep *)hw->back)->dev,