1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "i40e_osdep.h"
36 #include "i40e_register.h"
37 #include "i40e_type.h"
39 #include "i40e_lan_hmc.h"
40 #include "i40e_prototype.h"
42 /* lan specific interface functions */
45 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46 * @offset: base address offset needing alignment
48 * Aligns the layer 2 function private memory so it's 512-byte aligned.
50 static u64 i40e_align_l2obj_base(u64 offset)
52 u64 aligned_offset = offset;
54 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
58 return aligned_offset;
62 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63 * @txq_num: number of Tx queues needing backing context
64 * @rxq_num: number of Rx queues needing backing context
65 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66 * @fcoe_filt_num: number of FCoE filters needing backing context
68 * Calculates the maximum amount of memory for the function required, based
69 * on the number of resources it must provide context for.
71 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72 u32 fcoe_cntx_num, u32 fcoe_filt_num)
76 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77 fpm_size = i40e_align_l2obj_base(fpm_size);
79 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80 fpm_size = i40e_align_l2obj_base(fpm_size);
82 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83 fpm_size = i40e_align_l2obj_base(fpm_size);
85 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86 fpm_size = i40e_align_l2obj_base(fpm_size);
92 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93 * @hw: pointer to the HW structure
94 * @txq_num: number of Tx queues needing backing context
95 * @rxq_num: number of Rx queues needing backing context
96 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97 * @fcoe_filt_num: number of FCoE filters needing backing context
99 * This function will be called once per physical function initialization.
100 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101 * the driver's provided input, as well as information from the HMC itself
105 * - HMC Resource Profile has been selected before calling this function.
107 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108 u32 rxq_num, u32 fcoe_cntx_num,
111 struct i40e_hmc_obj_info *obj, *full_obj;
112 enum i40e_status_code ret_code = I40E_SUCCESS;
116 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117 hw->hmc.hmc_fn_id = hw->pf_id;
119 /* allocate memory for hmc_obj */
120 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
123 goto init_lan_hmc_out;
124 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125 hw->hmc.hmc_obj_virt_mem.va;
127 /* The full object will be used to create the LAN HMC SD */
128 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129 full_obj->max_cnt = 0;
134 /* Tx queue context information */
135 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
139 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140 obj->size = (u64)1 << size_exp;
142 /* validate values requested by driver don't exceed HMC capacity */
143 if (txq_num > obj->max_cnt) {
144 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145 DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146 txq_num, obj->max_cnt, ret_code);
147 goto init_lan_hmc_out;
150 /* aggregate values into the full LAN object for later */
151 full_obj->max_cnt += obj->max_cnt;
152 full_obj->cnt += obj->cnt;
154 /* Rx queue context information */
155 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
158 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161 obj->base = i40e_align_l2obj_base(obj->base);
162 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163 obj->size = (u64)1 << size_exp;
165 /* validate values requested by driver don't exceed HMC capacity */
166 if (rxq_num > obj->max_cnt) {
167 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168 DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169 rxq_num, obj->max_cnt, ret_code);
170 goto init_lan_hmc_out;
173 /* aggregate values into the full LAN object for later */
174 full_obj->max_cnt += obj->max_cnt;
175 full_obj->cnt += obj->cnt;
177 /* FCoE context information */
178 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180 obj->cnt = fcoe_cntx_num;
181 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184 obj->base = i40e_align_l2obj_base(obj->base);
185 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186 obj->size = (u64)1 << size_exp;
188 /* validate values requested by driver don't exceed HMC capacity */
189 if (fcoe_cntx_num > obj->max_cnt) {
190 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191 DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192 fcoe_cntx_num, obj->max_cnt, ret_code);
193 goto init_lan_hmc_out;
196 /* aggregate values into the full LAN object for later */
197 full_obj->max_cnt += obj->max_cnt;
198 full_obj->cnt += obj->cnt;
200 /* FCoE filter information */
201 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203 obj->cnt = fcoe_filt_num;
204 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207 obj->base = i40e_align_l2obj_base(obj->base);
208 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209 obj->size = (u64)1 << size_exp;
211 /* validate values requested by driver don't exceed HMC capacity */
212 if (fcoe_filt_num > obj->max_cnt) {
213 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214 DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215 fcoe_filt_num, obj->max_cnt, ret_code);
216 goto init_lan_hmc_out;
219 /* aggregate values into the full LAN object for later */
220 full_obj->max_cnt += obj->max_cnt;
221 full_obj->cnt += obj->cnt;
223 hw->hmc.first_sd_index = 0;
224 hw->hmc.sd_table.ref_cnt = 0;
225 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
227 if (NULL == hw->hmc.sd_table.sd_entry) {
228 hw->hmc.sd_table.sd_cnt = (u32)
229 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230 I40E_HMC_DIRECT_BP_SIZE;
232 /* allocate the sd_entry members in the sd_table */
233 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234 (sizeof(struct i40e_hmc_sd_entry) *
235 hw->hmc.sd_table.sd_cnt));
237 goto init_lan_hmc_out;
238 hw->hmc.sd_table.sd_entry =
239 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
241 /* store in the LAN full object for later */
242 full_obj->size = l2fpm_size;
249 * i40e_remove_pd_page - Remove a page from the page descriptor table
250 * @hw: pointer to the HW structure
251 * @hmc_info: pointer to the HMC configuration information structure
252 * @idx: segment descriptor index to find the relevant page descriptor
255 * 1. Marks the entry in pd table (for paged address mode) invalid
256 * 2. write to register PMPDINV to invalidate the backing page in FV cache
257 * 3. Decrement the ref count for pd_entry
259 * 1. caller can deallocate the memory used by pd after this function
262 static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
263 struct i40e_hmc_info *hmc_info,
266 enum i40e_status_code ret_code = I40E_SUCCESS;
268 if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
269 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
275 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
276 * @hw: pointer to our HW structure
277 * @hmc_info: pointer to the HMC configuration information structure
278 * @idx: the page index
281 * 1. Marks the entry in sd table (for direct address mode) invalid
282 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
283 * to 0) and PMSDDATAHIGH to invalidate the sd page
284 * 3. Decrement the ref count for the sd_entry
286 * 1. caller can deallocate the memory used by backing storage after this
289 static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
290 struct i40e_hmc_info *hmc_info,
293 enum i40e_status_code ret_code = I40E_SUCCESS;
295 if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
296 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
302 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
303 * @hw: pointer to the HW structure
304 * @info: pointer to i40e_hmc_create_obj_info struct
306 * This will allocate memory for PDs and backing pages and populate
307 * the sd and pd entries.
309 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
310 struct i40e_hmc_lan_create_obj_info *info)
312 enum i40e_status_code ret_code = I40E_SUCCESS;
313 struct i40e_hmc_sd_entry *sd_entry;
314 u32 pd_idx1 = 0, pd_lmt1 = 0;
315 u32 pd_idx = 0, pd_lmt = 0;
316 bool pd_error = FALSE;
322 ret_code = I40E_ERR_BAD_PTR;
323 DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
326 if (NULL == info->hmc_info) {
327 ret_code = I40E_ERR_BAD_PTR;
328 DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
331 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332 ret_code = I40E_ERR_BAD_PTR;
333 DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
337 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
343 if ((info->start_idx + info->count) >
344 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
351 /* find sd index and limit */
352 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353 info->start_idx, info->count,
355 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357 ret_code = I40E_ERR_INVALID_SD_INDEX;
361 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362 info->start_idx, info->count, &pd_idx,
365 /* This is to cover for cases where you may not want to have an SD with
366 * the full 2M memory but something smaller. By not filling out any
367 * size, the function will default the SD size to be 2M.
369 if (info->direct_mode_sz == 0)
370 sd_size = I40E_HMC_DIRECT_BP_SIZE;
372 sd_size = info->direct_mode_sz;
374 /* check if all the sds are valid. If not, allocate a page and
377 for (j = sd_idx; j < sd_lmt; j++) {
378 /* update the sd table entry */
379 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
382 if (I40E_SUCCESS != ret_code)
384 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386 /* check if all the pds in this sd are valid. If not,
387 * allocate a page and initialize it.
390 /* find pd_idx and pd_lmt in this sd */
391 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392 pd_lmt1 = min(pd_lmt,
393 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394 for (i = pd_idx1; i < pd_lmt1; i++) {
395 /* update the pd table entry */
396 ret_code = i40e_add_pd_table_entry(hw,
399 if (I40E_SUCCESS != ret_code) {
405 /* remove the backing pages from pd_idx1 to i */
406 while (i && (i > pd_idx1)) {
407 i40e_remove_pd_bp(hw, info->hmc_info,
413 if (!sd_entry->valid) {
414 sd_entry->valid = TRUE;
415 switch (sd_entry->entry_type) {
416 case I40E_SD_TYPE_PAGED:
417 I40E_SET_PF_SD_ENTRY(hw,
418 sd_entry->u.pd_table.pd_page_addr.pa,
419 j, sd_entry->entry_type);
421 case I40E_SD_TYPE_DIRECT:
422 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423 j, sd_entry->entry_type);
426 ret_code = I40E_ERR_INVALID_SD_TYPE;
434 /* cleanup for sd entries from j to sd_idx */
435 while (j && (j > sd_idx)) {
436 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437 switch (sd_entry->entry_type) {
438 case I40E_SD_TYPE_PAGED:
439 pd_idx1 = max(pd_idx,
440 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442 for (i = pd_idx1; i < pd_lmt1; i++) {
443 i40e_remove_pd_bp(hw, info->hmc_info, i);
445 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
447 case I40E_SD_TYPE_DIRECT:
448 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
451 ret_code = I40E_ERR_INVALID_SD_TYPE;
461 * i40e_configure_lan_hmc - prepare the HMC backing store
462 * @hw: pointer to the hw structure
463 * @model: the model for the layout of the SD/PD tables
465 * - This function will be called once per physical function initialization.
466 * - This function will be called after i40e_init_lan_hmc() and before
467 * any LAN/FCoE HMC objects can be created.
469 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
470 enum i40e_hmc_model model)
472 struct i40e_hmc_lan_create_obj_info info;
473 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
474 struct i40e_hmc_obj_info *obj;
475 enum i40e_status_code ret_code = I40E_SUCCESS;
477 /* Initialize part of the create object info struct */
478 info.hmc_info = &hw->hmc;
479 info.rsrc_type = I40E_HMC_LAN_FULL;
481 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
483 /* Build the SD entry for the LAN objects */
485 case I40E_HMC_MODEL_DIRECT_PREFERRED:
486 case I40E_HMC_MODEL_DIRECT_ONLY:
487 info.entry_type = I40E_SD_TYPE_DIRECT;
488 /* Make one big object, a single SD */
490 ret_code = i40e_create_lan_hmc_object(hw, &info);
491 if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
493 else if (ret_code != I40E_SUCCESS)
494 goto configure_lan_hmc_out;
495 /* else clause falls through the break */
497 case I40E_HMC_MODEL_PAGED_ONLY:
499 info.entry_type = I40E_SD_TYPE_PAGED;
500 /* Make one big object in the PD table */
502 ret_code = i40e_create_lan_hmc_object(hw, &info);
503 if (ret_code != I40E_SUCCESS)
504 goto configure_lan_hmc_out;
507 /* unsupported type */
508 ret_code = I40E_ERR_INVALID_SD_TYPE;
509 DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
511 goto configure_lan_hmc_out;
514 /* Configure and program the FPM registers so objects can be created */
517 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
518 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
519 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
520 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
523 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
524 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
525 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
526 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
529 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
530 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
531 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
532 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
535 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
536 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
537 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
538 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
540 configure_lan_hmc_out:
545 * i40e_delete_hmc_object - remove hmc objects
546 * @hw: pointer to the HW structure
547 * @info: pointer to i40e_hmc_delete_obj_info struct
549 * This will de-populate the SDs and PDs. It frees
550 * the memory for PDS and backing storage. After this function is returned,
551 * caller should deallocate memory allocated previously for
552 * book-keeping information about PDs and backing storage.
554 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
555 struct i40e_hmc_lan_delete_obj_info *info)
557 enum i40e_status_code ret_code = I40E_SUCCESS;
558 struct i40e_hmc_pd_table *pd_table;
559 u32 pd_idx, pd_lmt, rel_pd_idx;
564 ret_code = I40E_ERR_BAD_PTR;
565 DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
568 if (NULL == info->hmc_info) {
569 ret_code = I40E_ERR_BAD_PTR;
570 DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
573 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
574 ret_code = I40E_ERR_BAD_PTR;
575 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
579 if (NULL == info->hmc_info->sd_table.sd_entry) {
580 ret_code = I40E_ERR_BAD_PTR;
581 DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
585 if (NULL == info->hmc_info->hmc_obj) {
586 ret_code = I40E_ERR_BAD_PTR;
587 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
590 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
591 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
592 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
597 if ((info->start_idx + info->count) >
598 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
599 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
600 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
605 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
606 info->start_idx, info->count, &pd_idx,
609 for (j = pd_idx; j < pd_lmt; j++) {
610 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
612 if (I40E_SD_TYPE_PAGED !=
613 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
616 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
619 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
620 if (pd_table->pd_entry[rel_pd_idx].valid) {
621 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
622 if (I40E_SUCCESS != ret_code)
627 /* find sd index and limit */
628 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
629 info->start_idx, info->count,
631 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
632 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
633 ret_code = I40E_ERR_INVALID_SD_INDEX;
637 for (i = sd_idx; i < sd_lmt; i++) {
638 if (!info->hmc_info->sd_table.sd_entry[i].valid)
640 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
641 case I40E_SD_TYPE_DIRECT:
642 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
643 if (I40E_SUCCESS != ret_code)
646 case I40E_SD_TYPE_PAGED:
647 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
648 if (I40E_SUCCESS != ret_code)
660 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
661 * @hw: pointer to the hw structure
663 * This must be called by drivers as they are shutting down and being
664 * removed from the OS.
666 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
668 struct i40e_hmc_lan_delete_obj_info info;
669 enum i40e_status_code ret_code;
671 info.hmc_info = &hw->hmc;
672 info.rsrc_type = I40E_HMC_LAN_FULL;
676 /* delete the object */
677 ret_code = i40e_delete_lan_hmc_object(hw, &info);
679 /* free the SD table entry for LAN */
680 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
681 hw->hmc.sd_table.sd_cnt = 0;
682 hw->hmc.sd_table.sd_entry = NULL;
684 /* free memory used for hmc_obj */
685 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
686 hw->hmc.hmc_obj = NULL;
691 #define I40E_HMC_STORE(_struct, _ele) \
692 offsetof(struct _struct, _ele), \
693 FIELD_SIZEOF(struct _struct, _ele)
695 struct i40e_context_ele {
702 /* LAN Tx Queue Context */
703 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
704 /* Field Width LSB */
705 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
706 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
707 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
711 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
712 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
714 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
716 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
719 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
720 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
722 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
723 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
724 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
728 /* LAN Rx Queue Context */
729 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
730 /* Field Width LSB */
731 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
750 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
751 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
756 * i40e_write_byte - replace HMC context byte
757 * @hmc_bits: pointer to the HMC memory
758 * @ce_info: a description of the struct to be read from
759 * @src: the struct to be read from
761 static void i40e_write_byte(u8 *hmc_bits,
762 struct i40e_context_ele *ce_info,
765 u8 src_byte, dest_byte, mask;
769 /* copy from the next struct field */
770 from = src + ce_info->offset;
772 /* prepare the bits and mask */
773 shift_width = ce_info->lsb % 8;
774 mask = ((u8)1 << ce_info->width) - 1;
779 /* shift to correct alignment */
780 mask <<= shift_width;
781 src_byte <<= shift_width;
783 /* get the current bits from the target bit string */
784 dest = hmc_bits + (ce_info->lsb / 8);
786 i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
788 dest_byte &= ~mask; /* get the bits not changing */
789 dest_byte |= src_byte; /* add in the new bits */
791 /* put it all back */
792 i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
796 * i40e_write_word - replace HMC context word
797 * @hmc_bits: pointer to the HMC memory
798 * @ce_info: a description of the struct to be read from
799 * @src: the struct to be read from
801 static void i40e_write_word(u8 *hmc_bits,
802 struct i40e_context_ele *ce_info,
810 /* copy from the next struct field */
811 from = src + ce_info->offset;
813 /* prepare the bits and mask */
814 shift_width = ce_info->lsb % 8;
815 mask = ((u16)1 << ce_info->width) - 1;
817 /* don't swizzle the bits until after the mask because the mask bits
818 * will be in a different bit position on big endian machines
820 src_word = *(u16 *)from;
823 /* shift to correct alignment */
824 mask <<= shift_width;
825 src_word <<= shift_width;
827 /* get the current bits from the target bit string */
828 dest = hmc_bits + (ce_info->lsb / 8);
830 i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
832 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
833 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
835 /* put it all back */
836 i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
840 * i40e_write_dword - replace HMC context dword
841 * @hmc_bits: pointer to the HMC memory
842 * @ce_info: a description of the struct to be read from
843 * @src: the struct to be read from
845 static void i40e_write_dword(u8 *hmc_bits,
846 struct i40e_context_ele *ce_info,
854 /* copy from the next struct field */
855 from = src + ce_info->offset;
857 /* prepare the bits and mask */
858 shift_width = ce_info->lsb % 8;
860 /* if the field width is exactly 32 on an x86 machine, then the shift
861 * operation will not work because the SHL instructions count is masked
862 * to 5 bits so the shift will do nothing
864 if (ce_info->width < 32)
865 mask = ((u32)1 << ce_info->width) - 1;
869 /* don't swizzle the bits until after the mask because the mask bits
870 * will be in a different bit position on big endian machines
872 src_dword = *(u32 *)from;
875 /* shift to correct alignment */
876 mask <<= shift_width;
877 src_dword <<= shift_width;
879 /* get the current bits from the target bit string */
880 dest = hmc_bits + (ce_info->lsb / 8);
882 i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
884 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
885 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
887 /* put it all back */
888 i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
892 * i40e_write_qword - replace HMC context qword
893 * @hmc_bits: pointer to the HMC memory
894 * @ce_info: a description of the struct to be read from
895 * @src: the struct to be read from
897 static void i40e_write_qword(u8 *hmc_bits,
898 struct i40e_context_ele *ce_info,
906 /* copy from the next struct field */
907 from = src + ce_info->offset;
909 /* prepare the bits and mask */
910 shift_width = ce_info->lsb % 8;
912 /* if the field width is exactly 64 on an x86 machine, then the shift
913 * operation will not work because the SHL instructions count is masked
914 * to 6 bits so the shift will do nothing
916 if (ce_info->width < 64)
917 mask = ((u64)1 << ce_info->width) - 1;
919 mask = 0xFFFFFFFFFFFFFFFFUL;
921 /* don't swizzle the bits until after the mask because the mask bits
922 * will be in a different bit position on big endian machines
924 src_qword = *(u64 *)from;
927 /* shift to correct alignment */
928 mask <<= shift_width;
929 src_qword <<= shift_width;
931 /* get the current bits from the target bit string */
932 dest = hmc_bits + (ce_info->lsb / 8);
934 i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
936 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
937 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
939 /* put it all back */
940 i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
944 * i40e_read_byte - read HMC context byte into struct
945 * @hmc_bits: pointer to the HMC memory
946 * @ce_info: a description of the struct to be filled
947 * @dest: the struct to be filled
949 static void i40e_read_byte(u8 *hmc_bits,
950 struct i40e_context_ele *ce_info,
957 /* prepare the bits and mask */
958 shift_width = ce_info->lsb % 8;
959 mask = ((u8)1 << ce_info->width) - 1;
961 /* shift to correct alignment */
962 mask <<= shift_width;
964 /* get the current bits from the src bit string */
965 src = hmc_bits + (ce_info->lsb / 8);
967 i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
969 dest_byte &= ~(mask);
971 dest_byte >>= shift_width;
973 /* get the address from the struct field */
974 target = dest + ce_info->offset;
976 /* put it back in the struct */
977 i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
981 * i40e_read_word - read HMC context word into struct
982 * @hmc_bits: pointer to the HMC memory
983 * @ce_info: a description of the struct to be filled
984 * @dest: the struct to be filled
986 static void i40e_read_word(u8 *hmc_bits,
987 struct i40e_context_ele *ce_info,
995 /* prepare the bits and mask */
996 shift_width = ce_info->lsb % 8;
997 mask = ((u16)1 << ce_info->width) - 1;
999 /* shift to correct alignment */
1000 mask <<= shift_width;
1002 /* get the current bits from the src bit string */
1003 src = hmc_bits + (ce_info->lsb / 8);
1005 i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1007 /* the data in the memory is stored as little endian so mask it
1010 src_word &= ~(CPU_TO_LE16(mask));
1012 /* get the data back into host order before shifting */
1013 dest_word = LE16_TO_CPU(src_word);
1015 dest_word >>= shift_width;
1017 /* get the address from the struct field */
1018 target = dest + ce_info->offset;
1020 /* put it back in the struct */
1021 i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1025 * i40e_read_dword - read HMC context dword into struct
1026 * @hmc_bits: pointer to the HMC memory
1027 * @ce_info: a description of the struct to be filled
1028 * @dest: the struct to be filled
1030 static void i40e_read_dword(u8 *hmc_bits,
1031 struct i40e_context_ele *ce_info,
1034 u32 dest_dword, mask;
1039 /* prepare the bits and mask */
1040 shift_width = ce_info->lsb % 8;
1042 /* if the field width is exactly 32 on an x86 machine, then the shift
1043 * operation will not work because the SHL instructions count is masked
1044 * to 5 bits so the shift will do nothing
1046 if (ce_info->width < 32)
1047 mask = ((u32)1 << ce_info->width) - 1;
1051 /* shift to correct alignment */
1052 mask <<= shift_width;
1054 /* get the current bits from the src bit string */
1055 src = hmc_bits + (ce_info->lsb / 8);
1057 i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1059 /* the data in the memory is stored as little endian so mask it
1062 src_dword &= ~(CPU_TO_LE32(mask));
1064 /* get the data back into host order before shifting */
1065 dest_dword = LE32_TO_CPU(src_dword);
1067 dest_dword >>= shift_width;
1069 /* get the address from the struct field */
1070 target = dest + ce_info->offset;
1072 /* put it back in the struct */
1073 i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1074 I40E_NONDMA_TO_DMA);
1078 * i40e_read_qword - read HMC context qword into struct
1079 * @hmc_bits: pointer to the HMC memory
1080 * @ce_info: a description of the struct to be filled
1081 * @dest: the struct to be filled
1083 static void i40e_read_qword(u8 *hmc_bits,
1084 struct i40e_context_ele *ce_info,
1087 u64 dest_qword, mask;
1092 /* prepare the bits and mask */
1093 shift_width = ce_info->lsb % 8;
1095 /* if the field width is exactly 64 on an x86 machine, then the shift
1096 * operation will not work because the SHL instructions count is masked
1097 * to 6 bits so the shift will do nothing
1099 if (ce_info->width < 64)
1100 mask = ((u64)1 << ce_info->width) - 1;
1102 mask = 0xFFFFFFFFFFFFFFFFUL;
1104 /* shift to correct alignment */
1105 mask <<= shift_width;
1107 /* get the current bits from the src bit string */
1108 src = hmc_bits + (ce_info->lsb / 8);
1110 i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1112 /* the data in the memory is stored as little endian so mask it
1115 src_qword &= ~(CPU_TO_LE64(mask));
1117 /* get the data back into host order before shifting */
1118 dest_qword = LE64_TO_CPU(src_qword);
1120 dest_qword >>= shift_width;
1122 /* get the address from the struct field */
1123 target = dest + ce_info->offset;
1125 /* put it back in the struct */
1126 i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1127 I40E_NONDMA_TO_DMA);
1131 * i40e_get_hmc_context - extract HMC context bits
1132 * @context_bytes: pointer to the context bit array
1133 * @ce_info: a description of the struct to be filled
1134 * @dest: the struct to be filled
1136 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1137 struct i40e_context_ele *ce_info,
1142 for (f = 0; ce_info[f].width != 0; f++) {
1143 switch (ce_info[f].size_of) {
1145 i40e_read_byte(context_bytes, &ce_info[f], dest);
1148 i40e_read_word(context_bytes, &ce_info[f], dest);
1151 i40e_read_dword(context_bytes, &ce_info[f], dest);
1154 i40e_read_qword(context_bytes, &ce_info[f], dest);
1157 /* nothing to do, just keep going */
1162 return I40E_SUCCESS;
1166 * i40e_clear_hmc_context - zero out the HMC context bits
1167 * @hw: the hardware struct
1168 * @context_bytes: pointer to the context bit array (DMA memory)
1169 * @hmc_type: the type of HMC resource
1171 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1173 enum i40e_hmc_lan_rsrc_type hmc_type)
1175 /* clean the bit array */
1176 i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1179 return I40E_SUCCESS;
1183 * i40e_set_hmc_context - replace HMC context bits
1184 * @context_bytes: pointer to the context bit array
1185 * @ce_info: a description of the struct to be filled
1186 * @dest: the struct to be filled
1188 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1189 struct i40e_context_ele *ce_info,
1194 for (f = 0; ce_info[f].width != 0; f++) {
1196 /* we have to deal with each element of the HMC using the
1197 * correct size so that we are correct regardless of the
1198 * endianness of the machine
1200 switch (ce_info[f].size_of) {
1202 i40e_write_byte(context_bytes, &ce_info[f], dest);
1205 i40e_write_word(context_bytes, &ce_info[f], dest);
1208 i40e_write_dword(context_bytes, &ce_info[f], dest);
1211 i40e_write_qword(context_bytes, &ce_info[f], dest);
1216 return I40E_SUCCESS;
1220 * i40e_hmc_get_object_va - retrieves an object's virtual address
1221 * @hmc_info: pointer to i40e_hmc_info struct
1222 * @object_base: pointer to u64 to get the va
1223 * @rsrc_type: the hmc resource type
1224 * @obj_idx: hmc object index
1226 * This function retrieves the object's virtual address from the object
1227 * base pointer. This function is used for LAN Queue contexts.
1230 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
1232 enum i40e_hmc_lan_rsrc_type rsrc_type,
1235 u32 obj_offset_in_sd, obj_offset_in_pd;
1236 struct i40e_hmc_sd_entry *sd_entry;
1237 struct i40e_hmc_pd_entry *pd_entry;
1238 u32 pd_idx, pd_lmt, rel_pd_idx;
1239 enum i40e_status_code ret_code = I40E_SUCCESS;
1240 u64 obj_offset_in_fpm;
1243 if (NULL == hmc_info) {
1244 ret_code = I40E_ERR_BAD_PTR;
1245 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1248 if (NULL == hmc_info->hmc_obj) {
1249 ret_code = I40E_ERR_BAD_PTR;
1250 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1253 if (NULL == object_base) {
1254 ret_code = I40E_ERR_BAD_PTR;
1255 DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1258 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259 ret_code = I40E_ERR_BAD_PTR;
1260 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1263 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264 DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1266 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1269 /* find sd index and limit */
1270 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1273 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1277 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1280 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283 I40E_HMC_PAGED_BP_SIZE);
1284 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1286 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287 I40E_HMC_DIRECT_BP_SIZE);
1288 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1295 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296 * @hw: the hardware struct
1297 * @queue: the queue we care about
1298 * @s: the struct to be filled
1300 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1302 struct i40e_hmc_obj_txq *s)
1304 enum i40e_status_code err;
1307 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1308 I40E_HMC_LAN_TX, queue);
1312 return i40e_get_hmc_context(context_bytes,
1313 i40e_hmc_txq_ce_info, (u8 *)s);
1317 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1318 * @hw: the hardware struct
1319 * @queue: the queue we care about
1321 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1324 enum i40e_status_code err;
1327 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1328 I40E_HMC_LAN_TX, queue);
1332 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1336 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1337 * @hw: the hardware struct
1338 * @queue: the queue we care about
1339 * @s: the struct to be filled
1341 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1343 struct i40e_hmc_obj_txq *s)
1345 enum i40e_status_code err;
1348 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1349 I40E_HMC_LAN_TX, queue);
1353 return i40e_set_hmc_context(context_bytes,
1354 i40e_hmc_txq_ce_info, (u8 *)s);
1358 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1359 * @hw: the hardware struct
1360 * @queue: the queue we care about
1361 * @s: the struct to be filled
1363 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1365 struct i40e_hmc_obj_rxq *s)
1367 enum i40e_status_code err;
1370 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1371 I40E_HMC_LAN_RX, queue);
1375 return i40e_get_hmc_context(context_bytes,
1376 i40e_hmc_rxq_ce_info, (u8 *)s);
1380 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1381 * @hw: the hardware struct
1382 * @queue: the queue we care about
1384 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1387 enum i40e_status_code err;
1390 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1391 I40E_HMC_LAN_RX, queue);
1395 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1399 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1400 * @hw: the hardware struct
1401 * @queue: the queue we care about
1402 * @s: the struct to be filled
1404 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1406 struct i40e_hmc_obj_rxq *s)
1408 enum i40e_status_code err;
1411 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1412 I40E_HMC_LAN_RX, queue);
1416 return i40e_set_hmc_context(context_bytes,
1417 i40e_hmc_rxq_ce_info, (u8 *)s);