]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/ixl/i40e_hmc.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / ixl / i40e_hmc.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "i40e_osdep.h"
36 #include "i40e_register.h"
37 #include "i40e_status.h"
38 #include "i40e_alloc.h"
39 #include "i40e_hmc.h"
40 #ifndef I40E_NO_TYPE_HEADER
41 #include "i40e_type.h"
42 #endif
43
44 /**
45  * i40e_add_sd_table_entry - Adds a segment descriptor to the table
46  * @hw: pointer to our hw struct
47  * @hmc_info: pointer to the HMC configuration information struct
48  * @sd_index: segment descriptor index to manipulate
49  * @type: what type of segment descriptor we're manipulating
50  * @direct_mode_sz: size to alloc in direct mode
51  **/
52 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
53                                               struct i40e_hmc_info *hmc_info,
54                                               u32 sd_index,
55                                               enum i40e_sd_entry_type type,
56                                               u64 direct_mode_sz)
57 {
58         enum i40e_status_code ret_code = I40E_SUCCESS;
59         struct i40e_hmc_sd_entry *sd_entry;
60         enum   i40e_memory_type mem_type;
61         bool dma_mem_alloc_done = FALSE;
62         struct i40e_dma_mem mem;
63         u64 alloc_len;
64
65         if (NULL == hmc_info->sd_table.sd_entry) {
66                 ret_code = I40E_ERR_BAD_PTR;
67                 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
68                 goto exit;
69         }
70
71         if (sd_index >= hmc_info->sd_table.sd_cnt) {
72                 ret_code = I40E_ERR_INVALID_SD_INDEX;
73                 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
74                 goto exit;
75         }
76
77         sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
78         if (!sd_entry->valid) {
79                 if (I40E_SD_TYPE_PAGED == type) {
80                         mem_type = i40e_mem_pd;
81                         alloc_len = I40E_HMC_PAGED_BP_SIZE;
82                 } else {
83                         mem_type = i40e_mem_bp_jumbo;
84                         alloc_len = direct_mode_sz;
85                 }
86
87                 /* allocate a 4K pd page or 2M backing page */
88                 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
89                                                  I40E_HMC_PD_BP_BUF_ALIGNMENT);
90                 if (ret_code)
91                         goto exit;
92                 dma_mem_alloc_done = TRUE;
93                 if (I40E_SD_TYPE_PAGED == type) {
94                         ret_code = i40e_allocate_virt_mem(hw,
95                                         &sd_entry->u.pd_table.pd_entry_virt_mem,
96                                         sizeof(struct i40e_hmc_pd_entry) * 512);
97                         if (ret_code)
98                                 goto exit;
99                         sd_entry->u.pd_table.pd_entry =
100                                 (struct i40e_hmc_pd_entry *)
101                                 sd_entry->u.pd_table.pd_entry_virt_mem.va;
102                         i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
103                                     &mem, sizeof(struct i40e_dma_mem),
104                                     I40E_NONDMA_TO_NONDMA);
105                 } else {
106                         i40e_memcpy(&sd_entry->u.bp.addr,
107                                     &mem, sizeof(struct i40e_dma_mem),
108                                     I40E_NONDMA_TO_NONDMA);
109                         sd_entry->u.bp.sd_pd_index = sd_index;
110                 }
111                 /* initialize the sd entry */
112                 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
113
114                 /* increment the ref count */
115                 I40E_INC_SD_REFCNT(&hmc_info->sd_table);
116         }
117         /* Increment backing page reference count */
118         if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
119                 I40E_INC_BP_REFCNT(&sd_entry->u.bp);
120 exit:
121         if (I40E_SUCCESS != ret_code)
122                 if (dma_mem_alloc_done)
123                         i40e_free_dma_mem(hw, &mem);
124
125         return ret_code;
126 }
127
128 /**
129  * i40e_add_pd_table_entry - Adds page descriptor to the specified table
130  * @hw: pointer to our HW structure
131  * @hmc_info: pointer to the HMC configuration information structure
132  * @pd_index: which page descriptor index to manipulate
133  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
134  *
135  * This function:
136  *      1. Initializes the pd entry
137  *      2. Adds pd_entry in the pd_table
138  *      3. Mark the entry valid in i40e_hmc_pd_entry structure
139  *      4. Initializes the pd_entry's ref count to 1
140  * assumptions:
141  *      1. The memory for pd should be pinned down, physically contiguous and
142  *         aligned on 4K boundary and zeroed memory.
143  *      2. It should be 4K in size.
144  **/
145 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
146                                               struct i40e_hmc_info *hmc_info,
147                                               u32 pd_index,
148                                               struct i40e_dma_mem *rsrc_pg)
149 {
150         enum i40e_status_code ret_code = I40E_SUCCESS;
151         struct i40e_hmc_pd_table *pd_table;
152         struct i40e_hmc_pd_entry *pd_entry;
153         struct i40e_dma_mem mem;
154         struct i40e_dma_mem *page = &mem;
155         u32 sd_idx, rel_pd_idx;
156         u64 *pd_addr;
157         u64 page_desc;
158
159         if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
160                 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
161                 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
162                 goto exit;
163         }
164
165         /* find corresponding sd */
166         sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
167         if (I40E_SD_TYPE_PAGED !=
168             hmc_info->sd_table.sd_entry[sd_idx].entry_type)
169                 goto exit;
170
171         rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
172         pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
173         pd_entry = &pd_table->pd_entry[rel_pd_idx];
174         if (!pd_entry->valid) {
175                 if (rsrc_pg) {
176                         pd_entry->rsrc_pg = TRUE;
177                         page = rsrc_pg;
178                 } else {
179                         /* allocate a 4K backing page */
180                         ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
181                                                 I40E_HMC_PAGED_BP_SIZE,
182                                                 I40E_HMC_PD_BP_BUF_ALIGNMENT);
183                         if (ret_code)
184                                 goto exit;
185                         pd_entry->rsrc_pg = FALSE;
186                 }
187
188                 i40e_memcpy(&pd_entry->bp.addr, page,
189                             sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
190                 pd_entry->bp.sd_pd_index = pd_index;
191                 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
192                 /* Set page address and valid bit */
193                 page_desc = page->pa | 0x1;
194
195                 pd_addr = (u64 *)pd_table->pd_page_addr.va;
196                 pd_addr += rel_pd_idx;
197
198                 /* Add the backing page physical address in the pd entry */
199                 i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
200                             I40E_NONDMA_TO_DMA);
201
202                 pd_entry->sd_index = sd_idx;
203                 pd_entry->valid = TRUE;
204                 I40E_INC_PD_REFCNT(pd_table);
205         }
206         I40E_INC_BP_REFCNT(&pd_entry->bp);
207 exit:
208         return ret_code;
209 }
210
211 /**
212  * i40e_remove_pd_bp - remove a backing page from a page descriptor
213  * @hw: pointer to our HW structure
214  * @hmc_info: pointer to the HMC configuration information structure
215  * @idx: the page index
216  * @is_pf: distinguishes a VF from a PF
217  *
218  * This function:
219  *      1. Marks the entry in pd tabe (for paged address mode) or in sd table
220  *         (for direct address mode) invalid.
221  *      2. Write to register PMPDINV to invalidate the backing page in FV cache
222  *      3. Decrement the ref count for the pd _entry
223  * assumptions:
224  *      1. Caller can deallocate the memory used by backing storage after this
225  *         function returns.
226  **/
227 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
228                                         struct i40e_hmc_info *hmc_info,
229                                         u32 idx)
230 {
231         enum i40e_status_code ret_code = I40E_SUCCESS;
232         struct i40e_hmc_pd_entry *pd_entry;
233         struct i40e_hmc_pd_table *pd_table;
234         struct i40e_hmc_sd_entry *sd_entry;
235         u32 sd_idx, rel_pd_idx;
236         u64 *pd_addr;
237
238         /* calculate index */
239         sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
240         rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
241         if (sd_idx >= hmc_info->sd_table.sd_cnt) {
242                 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
243                 DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
244                 goto exit;
245         }
246         sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
247         if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
248                 ret_code = I40E_ERR_INVALID_SD_TYPE;
249                 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
250                 goto exit;
251         }
252         /* get the entry and decrease its ref counter */
253         pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
254         pd_entry = &pd_table->pd_entry[rel_pd_idx];
255         I40E_DEC_BP_REFCNT(&pd_entry->bp);
256         if (pd_entry->bp.ref_cnt)
257                 goto exit;
258
259         /* mark the entry invalid */
260         pd_entry->valid = FALSE;
261         I40E_DEC_PD_REFCNT(pd_table);
262         pd_addr = (u64 *)pd_table->pd_page_addr.va;
263         pd_addr += rel_pd_idx;
264         i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
265         I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
266
267         /* free memory here */
268         if (!pd_entry->rsrc_pg)
269                 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
270         if (I40E_SUCCESS != ret_code)
271                 goto exit;
272         if (!pd_table->ref_cnt)
273                 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
274 exit:
275         return ret_code;
276 }
277
278 /**
279  * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
280  * @hmc_info: pointer to the HMC configuration information structure
281  * @idx: the page index
282  **/
283 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
284                                              u32 idx)
285 {
286         enum i40e_status_code ret_code = I40E_SUCCESS;
287         struct i40e_hmc_sd_entry *sd_entry;
288
289         /* get the entry and decrease its ref counter */
290         sd_entry = &hmc_info->sd_table.sd_entry[idx];
291         I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
292         if (sd_entry->u.bp.ref_cnt) {
293                 ret_code = I40E_ERR_NOT_READY;
294                 goto exit;
295         }
296         I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
297
298         /* mark the entry invalid */
299         sd_entry->valid = FALSE;
300 exit:
301         return ret_code;
302 }
303
304 /**
305  * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
306  * @hw: pointer to our hw struct
307  * @hmc_info: pointer to the HMC configuration information structure
308  * @idx: the page index
309  * @is_pf: used to distinguish between VF and PF
310  **/
311 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
312                                             struct i40e_hmc_info *hmc_info,
313                                             u32 idx, bool is_pf)
314 {
315         struct i40e_hmc_sd_entry *sd_entry;
316
317         if (!is_pf)
318                 return I40E_NOT_SUPPORTED;
319
320         /* get the entry and decrease its ref counter */
321         sd_entry = &hmc_info->sd_table.sd_entry[idx];
322         I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
323
324         return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
325 }
326
327 /**
328  * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
329  * @hmc_info: pointer to the HMC configuration information structure
330  * @idx: segment descriptor index to find the relevant page descriptor
331  **/
332 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
333                                                u32 idx)
334 {
335         enum i40e_status_code ret_code = I40E_SUCCESS;
336         struct i40e_hmc_sd_entry *sd_entry;
337
338         sd_entry = &hmc_info->sd_table.sd_entry[idx];
339
340         if (sd_entry->u.pd_table.ref_cnt) {
341                 ret_code = I40E_ERR_NOT_READY;
342                 goto exit;
343         }
344
345         /* mark the entry invalid */
346         sd_entry->valid = FALSE;
347
348         I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
349 exit:
350         return ret_code;
351 }
352
353 /**
354  * i40e_remove_pd_page_new - Removes a PD page from sd entry.
355  * @hw: pointer to our hw struct
356  * @hmc_info: pointer to the HMC configuration information structure
357  * @idx: segment descriptor index to find the relevant page descriptor
358  * @is_pf: used to distinguish between VF and PF
359  **/
360 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
361                                               struct i40e_hmc_info *hmc_info,
362                                               u32 idx, bool is_pf)
363 {
364         struct i40e_hmc_sd_entry *sd_entry;
365
366         if (!is_pf)
367                 return I40E_NOT_SUPPORTED;
368
369         sd_entry = &hmc_info->sd_table.sd_entry[idx];
370         I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
371
372         return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
373 }