]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/i40e_adminq.c
fd: remove the seq argument from fget routines
[FreeBSD/FreeBSD.git] / sys / dev / ixl / i40e_adminq.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40
41 /**
42  *  i40e_adminq_init_regs - Initialize AdminQ registers
43  *  @hw: pointer to the hardware structure
44  *
45  *  This assumes the alloc_asq and alloc_arq functions have already been called
46  **/
47 static void i40e_adminq_init_regs(struct i40e_hw *hw)
48 {
49         /* set head and tail registers in our local struct */
50         if (i40e_is_vf(hw)) {
51                 hw->aq.asq.tail = I40E_VF_ATQT1;
52                 hw->aq.asq.head = I40E_VF_ATQH1;
53                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
54                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56                 hw->aq.arq.tail = I40E_VF_ARQT1;
57                 hw->aq.arq.head = I40E_VF_ARQH1;
58                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
59                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72         }
73 }
74
75 /**
76  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77  *  @hw: pointer to the hardware structure
78  **/
79 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80 {
81         enum i40e_status_code ret_code;
82
83         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84                                          i40e_mem_atq_ring,
85                                          (hw->aq.num_asq_entries *
86                                          sizeof(struct i40e_aq_desc)),
87                                          I40E_ADMINQ_DESC_ALIGNMENT);
88         if (ret_code)
89                 return ret_code;
90
91         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92                                           (hw->aq.num_asq_entries *
93                                           sizeof(struct i40e_asq_cmd_details)));
94         if (ret_code) {
95                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96                 return ret_code;
97         }
98
99         return ret_code;
100 }
101
102 /**
103  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104  *  @hw: pointer to the hardware structure
105  **/
106 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107 {
108         enum i40e_status_code ret_code;
109
110         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111                                          i40e_mem_arq_ring,
112                                          (hw->aq.num_arq_entries *
113                                          sizeof(struct i40e_aq_desc)),
114                                          I40E_ADMINQ_DESC_ALIGNMENT);
115
116         return ret_code;
117 }
118
119 /**
120  *  i40e_free_adminq_asq - Free Admin Queue send rings
121  *  @hw: pointer to the hardware structure
122  *
123  *  This assumes the posted send buffers have already been cleaned
124  *  and de-allocated
125  **/
126 void i40e_free_adminq_asq(struct i40e_hw *hw)
127 {
128         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
129 }
130
131 /**
132  *  i40e_free_adminq_arq - Free Admin Queue receive rings
133  *  @hw: pointer to the hardware structure
134  *
135  *  This assumes the posted receive buffers have already been cleaned
136  *  and de-allocated
137  **/
138 void i40e_free_adminq_arq(struct i40e_hw *hw)
139 {
140         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
141 }
142
143 /**
144  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
145  *  @hw: pointer to the hardware structure
146  **/
147 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
148 {
149         enum i40e_status_code ret_code;
150         struct i40e_aq_desc *desc;
151         struct i40e_dma_mem *bi;
152         int i;
153
154         /* We'll be allocating the buffer info memory first, then we can
155          * allocate the mapped buffers for the event processing
156          */
157
158         /* buffer_info structures do not need alignment */
159         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
160                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
161         if (ret_code)
162                 goto alloc_arq_bufs;
163         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
164
165         /* allocate the mapped buffers */
166         for (i = 0; i < hw->aq.num_arq_entries; i++) {
167                 bi = &hw->aq.arq.r.arq_bi[i];
168                 ret_code = i40e_allocate_dma_mem(hw, bi,
169                                                  i40e_mem_arq_buf,
170                                                  hw->aq.arq_buf_size,
171                                                  I40E_ADMINQ_DESC_ALIGNMENT);
172                 if (ret_code)
173                         goto unwind_alloc_arq_bufs;
174
175                 /* now configure the descriptors for use */
176                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
177
178                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
179                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
180                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
181                 desc->opcode = 0;
182                 /* This is in accordance with Admin queue design, there is no
183                  * register for buffer size configuration
184                  */
185                 desc->datalen = CPU_TO_LE16((u16)bi->size);
186                 desc->retval = 0;
187                 desc->cookie_high = 0;
188                 desc->cookie_low = 0;
189                 desc->params.external.addr_high =
190                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
191                 desc->params.external.addr_low =
192                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
193                 desc->params.external.param0 = 0;
194                 desc->params.external.param1 = 0;
195         }
196
197 alloc_arq_bufs:
198         return ret_code;
199
200 unwind_alloc_arq_bufs:
201         /* don't try to free the one that failed... */
202         i--;
203         for (; i >= 0; i--)
204                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
205         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
206
207         return ret_code;
208 }
209
210 /**
211  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
212  *  @hw: pointer to the hardware structure
213  **/
214 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
215 {
216         enum i40e_status_code ret_code;
217         struct i40e_dma_mem *bi;
218         int i;
219
220         /* No mapped memory needed yet, just the buffer info structures */
221         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
222                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
223         if (ret_code)
224                 goto alloc_asq_bufs;
225         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
226
227         /* allocate the mapped buffers */
228         for (i = 0; i < hw->aq.num_asq_entries; i++) {
229                 bi = &hw->aq.asq.r.asq_bi[i];
230                 ret_code = i40e_allocate_dma_mem(hw, bi,
231                                                  i40e_mem_asq_buf,
232                                                  hw->aq.asq_buf_size,
233                                                  I40E_ADMINQ_DESC_ALIGNMENT);
234                 if (ret_code)
235                         goto unwind_alloc_asq_bufs;
236         }
237 alloc_asq_bufs:
238         return ret_code;
239
240 unwind_alloc_asq_bufs:
241         /* don't try to free the one that failed... */
242         i--;
243         for (; i >= 0; i--)
244                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
245         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
246
247         return ret_code;
248 }
249
250 /**
251  *  i40e_free_arq_bufs - Free receive queue buffer info elements
252  *  @hw: pointer to the hardware structure
253  **/
254 static void i40e_free_arq_bufs(struct i40e_hw *hw)
255 {
256         int i;
257
258         /* free descriptors */
259         for (i = 0; i < hw->aq.num_arq_entries; i++)
260                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
261
262         /* free the descriptor memory */
263         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
264
265         /* free the dma header */
266         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
267 }
268
269 /**
270  *  i40e_free_asq_bufs - Free send queue buffer info elements
271  *  @hw: pointer to the hardware structure
272  **/
273 static void i40e_free_asq_bufs(struct i40e_hw *hw)
274 {
275         int i;
276
277         /* only unmap if the address is non-NULL */
278         for (i = 0; i < hw->aq.num_asq_entries; i++)
279                 if (hw->aq.asq.r.asq_bi[i].pa)
280                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
281
282         /* free the buffer info list */
283         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
284
285         /* free the descriptor memory */
286         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
287
288         /* free the dma header */
289         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
290 }
291
292 /**
293  *  i40e_config_asq_regs - configure ASQ registers
294  *  @hw: pointer to the hardware structure
295  *
296  *  Configure base address and length registers for the transmit queue
297  **/
298 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
299 {
300         enum i40e_status_code ret_code = I40E_SUCCESS;
301         u32 reg = 0;
302
303         /* Clear Head and Tail */
304         wr32(hw, hw->aq.asq.head, 0);
305         wr32(hw, hw->aq.asq.tail, 0);
306
307         /* set starting point */
308         if (!i40e_is_vf(hw))
309                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
310                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
311         if (i40e_is_vf(hw))
312                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
314         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
315         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
316
317         /* Check one register to verify that config was applied */
318         reg = rd32(hw, hw->aq.asq.bal);
319         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
320                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
321
322         return ret_code;
323 }
324
325 /**
326  *  i40e_config_arq_regs - ARQ register configuration
327  *  @hw: pointer to the hardware structure
328  *
329  * Configure base address and length registers for the receive (event queue)
330  **/
331 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
332 {
333         enum i40e_status_code ret_code = I40E_SUCCESS;
334         u32 reg = 0;
335
336         /* Clear Head and Tail */
337         wr32(hw, hw->aq.arq.head, 0);
338         wr32(hw, hw->aq.arq.tail, 0);
339
340         /* set starting point */
341         if (!i40e_is_vf(hw))
342                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
343                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
344         if (i40e_is_vf(hw))
345                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
346                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
347         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
348         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
349
350         /* Update tail in the HW to post pre-allocated buffers */
351         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
352
353         /* Check one register to verify that config was applied */
354         reg = rd32(hw, hw->aq.arq.bal);
355         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
356                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
357
358         return ret_code;
359 }
360
361 /**
362  *  i40e_init_asq - main initialization routine for ASQ
363  *  @hw: pointer to the hardware structure
364  *
365  *  This is the main initialization routine for the Admin Send Queue
366  *  Prior to calling this function, drivers *MUST* set the following fields
367  *  in the hw->aq structure:
368  *     - hw->aq.num_asq_entries
369  *     - hw->aq.arq_buf_size
370  *
371  *  Do *NOT* hold the lock when calling this as the memory allocation routines
372  *  called are not going to be atomic context safe
373  **/
374 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
375 {
376         enum i40e_status_code ret_code = I40E_SUCCESS;
377
378         if (hw->aq.asq.count > 0) {
379                 /* queue already initialized */
380                 ret_code = I40E_ERR_NOT_READY;
381                 goto init_adminq_exit;
382         }
383
384         /* verify input for valid configuration */
385         if ((hw->aq.num_asq_entries == 0) ||
386             (hw->aq.asq_buf_size == 0)) {
387                 ret_code = I40E_ERR_CONFIG;
388                 goto init_adminq_exit;
389         }
390
391         hw->aq.asq.next_to_use = 0;
392         hw->aq.asq.next_to_clean = 0;
393
394         /* allocate the ring memory */
395         ret_code = i40e_alloc_adminq_asq_ring(hw);
396         if (ret_code != I40E_SUCCESS)
397                 goto init_adminq_exit;
398
399         /* allocate buffers in the rings */
400         ret_code = i40e_alloc_asq_bufs(hw);
401         if (ret_code != I40E_SUCCESS)
402                 goto init_adminq_free_rings;
403
404         /* initialize base registers */
405         ret_code = i40e_config_asq_regs(hw);
406         if (ret_code != I40E_SUCCESS)
407                 goto init_adminq_free_rings;
408
409         /* success! */
410         hw->aq.asq.count = hw->aq.num_asq_entries;
411         goto init_adminq_exit;
412
413 init_adminq_free_rings:
414         i40e_free_adminq_asq(hw);
415
416 init_adminq_exit:
417         return ret_code;
418 }
419
420 /**
421  *  i40e_init_arq - initialize ARQ
422  *  @hw: pointer to the hardware structure
423  *
424  *  The main initialization routine for the Admin Receive (Event) Queue.
425  *  Prior to calling this function, drivers *MUST* set the following fields
426  *  in the hw->aq structure:
427  *     - hw->aq.num_asq_entries
428  *     - hw->aq.arq_buf_size
429  *
430  *  Do *NOT* hold the lock when calling this as the memory allocation routines
431  *  called are not going to be atomic context safe
432  **/
433 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
434 {
435         enum i40e_status_code ret_code = I40E_SUCCESS;
436
437         if (hw->aq.arq.count > 0) {
438                 /* queue already initialized */
439                 ret_code = I40E_ERR_NOT_READY;
440                 goto init_adminq_exit;
441         }
442
443         /* verify input for valid configuration */
444         if ((hw->aq.num_arq_entries == 0) ||
445             (hw->aq.arq_buf_size == 0)) {
446                 ret_code = I40E_ERR_CONFIG;
447                 goto init_adminq_exit;
448         }
449
450         hw->aq.arq.next_to_use = 0;
451         hw->aq.arq.next_to_clean = 0;
452
453         /* allocate the ring memory */
454         ret_code = i40e_alloc_adminq_arq_ring(hw);
455         if (ret_code != I40E_SUCCESS)
456                 goto init_adminq_exit;
457
458         /* allocate buffers in the rings */
459         ret_code = i40e_alloc_arq_bufs(hw);
460         if (ret_code != I40E_SUCCESS)
461                 goto init_adminq_free_rings;
462
463         /* initialize base registers */
464         ret_code = i40e_config_arq_regs(hw);
465         if (ret_code != I40E_SUCCESS)
466                 goto init_adminq_free_rings;
467
468         /* success! */
469         hw->aq.arq.count = hw->aq.num_arq_entries;
470         goto init_adminq_exit;
471
472 init_adminq_free_rings:
473         i40e_free_adminq_arq(hw);
474
475 init_adminq_exit:
476         return ret_code;
477 }
478
479 /**
480  *  i40e_shutdown_asq - shutdown the ASQ
481  *  @hw: pointer to the hardware structure
482  *
483  *  The main shutdown routine for the Admin Send Queue
484  **/
485 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
486 {
487         enum i40e_status_code ret_code = I40E_SUCCESS;
488
489         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
490
491         if (hw->aq.asq.count == 0) {
492                 ret_code = I40E_ERR_NOT_READY;
493                 goto shutdown_asq_out;
494         }
495
496         /* Stop firmware AdminQ processing */
497         wr32(hw, hw->aq.asq.head, 0);
498         wr32(hw, hw->aq.asq.tail, 0);
499         wr32(hw, hw->aq.asq.len, 0);
500         wr32(hw, hw->aq.asq.bal, 0);
501         wr32(hw, hw->aq.asq.bah, 0);
502
503         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
504
505         /* free ring buffers */
506         i40e_free_asq_bufs(hw);
507
508 shutdown_asq_out:
509         i40e_release_spinlock(&hw->aq.asq_spinlock);
510         return ret_code;
511 }
512
513 /**
514  *  i40e_shutdown_arq - shutdown ARQ
515  *  @hw: pointer to the hardware structure
516  *
517  *  The main shutdown routine for the Admin Receive Queue
518  **/
519 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
520 {
521         enum i40e_status_code ret_code = I40E_SUCCESS;
522
523         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
524
525         if (hw->aq.arq.count == 0) {
526                 ret_code = I40E_ERR_NOT_READY;
527                 goto shutdown_arq_out;
528         }
529
530         /* Stop firmware AdminQ processing */
531         wr32(hw, hw->aq.arq.head, 0);
532         wr32(hw, hw->aq.arq.tail, 0);
533         wr32(hw, hw->aq.arq.len, 0);
534         wr32(hw, hw->aq.arq.bal, 0);
535         wr32(hw, hw->aq.arq.bah, 0);
536
537         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538
539         /* free ring buffers */
540         i40e_free_arq_bufs(hw);
541
542 shutdown_arq_out:
543         i40e_release_spinlock(&hw->aq.arq_spinlock);
544         return ret_code;
545 }
546
547 /**
548  *  i40e_resume_aq - resume AQ processing from 0
549  *  @hw: pointer to the hardware structure
550  **/
551 static void i40e_resume_aq(struct i40e_hw *hw)
552 {
553         /* Registers are reset after PF reset */
554         hw->aq.asq.next_to_use = 0;
555         hw->aq.asq.next_to_clean = 0;
556
557         i40e_config_asq_regs(hw);
558
559         hw->aq.arq.next_to_use = 0;
560         hw->aq.arq.next_to_clean = 0;
561
562         i40e_config_arq_regs(hw);
563 }
564
565 /**
566  *  i40e_init_adminq - main initialization routine for Admin Queue
567  *  @hw: pointer to the hardware structure
568  *
569  *  Prior to calling this function, drivers *MUST* set the following fields
570  *  in the hw->aq structure:
571  *     - hw->aq.num_asq_entries
572  *     - hw->aq.num_arq_entries
573  *     - hw->aq.arq_buf_size
574  *     - hw->aq.asq_buf_size
575  **/
576 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
577 {
578         u16 cfg_ptr, oem_hi, oem_lo;
579         u16 eetrack_lo, eetrack_hi;
580         enum i40e_status_code ret_code;
581         int retry = 0;
582
583         /* verify input for valid configuration */
584         if ((hw->aq.num_arq_entries == 0) ||
585             (hw->aq.num_asq_entries == 0) ||
586             (hw->aq.arq_buf_size == 0) ||
587             (hw->aq.asq_buf_size == 0)) {
588                 ret_code = I40E_ERR_CONFIG;
589                 goto init_adminq_exit;
590         }
591         i40e_init_spinlock(&hw->aq.asq_spinlock);
592         i40e_init_spinlock(&hw->aq.arq_spinlock);
593
594         /* Set up register offsets */
595         i40e_adminq_init_regs(hw);
596
597         /* setup ASQ command write back timeout */
598         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
599
600         /* allocate the ASQ */
601         ret_code = i40e_init_asq(hw);
602         if (ret_code != I40E_SUCCESS)
603                 goto init_adminq_destroy_spinlocks;
604
605         /* allocate the ARQ */
606         ret_code = i40e_init_arq(hw);
607         if (ret_code != I40E_SUCCESS)
608                 goto init_adminq_free_asq;
609
610         /* VF has no need of firmware */
611         if (i40e_is_vf(hw))
612                 goto init_adminq_exit;
613         /* There are some cases where the firmware may not be quite ready
614          * for AdminQ operations, so we retry the AdminQ setup a few times
615          * if we see timeouts in this first AQ call.
616          */
617         do {
618                 ret_code = i40e_aq_get_firmware_version(hw,
619                                                         &hw->aq.fw_maj_ver,
620                                                         &hw->aq.fw_min_ver,
621                                                         &hw->aq.fw_build,
622                                                         &hw->aq.api_maj_ver,
623                                                         &hw->aq.api_min_ver,
624                                                         NULL);
625                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
626                         break;
627                 retry++;
628                 i40e_msec_delay(100);
629                 i40e_resume_aq(hw);
630         } while (retry < 10);
631         if (ret_code != I40E_SUCCESS)
632                 goto init_adminq_free_arq;
633
634         /* get the NVM version info */
635         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
636                            &hw->nvm.version);
637         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
638         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
639         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
640         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
641         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
642                            &oem_hi);
643         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
644                            &oem_lo);
645         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
646
647         /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
648         if ((hw->aq.api_maj_ver > 1) ||
649             ((hw->aq.api_maj_ver == 1) &&
650              (hw->aq.api_min_ver >= 7)))
651                 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
652
653         if (hw->mac.type == I40E_MAC_XL710 &&
654             hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
655             hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
656                 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
657         }
658
659         /* Newer versions of firmware require lock when reading the NVM */
660         if ((hw->aq.api_maj_ver > 1) ||
661             ((hw->aq.api_maj_ver == 1) &&
662              (hw->aq.api_min_ver >= 5)))
663                 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
664
665         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
666                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
667                 goto init_adminq_free_arq;
668         }
669
670         /* pre-emptive resource lock release */
671         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
672         hw->nvm_release_on_done = FALSE;
673         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
674
675         ret_code = I40E_SUCCESS;
676
677         /* success! */
678         goto init_adminq_exit;
679
680 init_adminq_free_arq:
681         i40e_shutdown_arq(hw);
682 init_adminq_free_asq:
683         i40e_shutdown_asq(hw);
684 init_adminq_destroy_spinlocks:
685         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
686         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
687
688 init_adminq_exit:
689         return ret_code;
690 }
691
692 /**
693  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
694  *  @hw: pointer to the hardware structure
695  **/
696 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
697 {
698         enum i40e_status_code ret_code = I40E_SUCCESS;
699
700         if (i40e_check_asq_alive(hw))
701                 i40e_aq_queue_shutdown(hw, TRUE);
702
703         i40e_shutdown_asq(hw);
704         i40e_shutdown_arq(hw);
705         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
706         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
707
708         if (hw->nvm_buff.va)
709                 i40e_free_virt_mem(hw, &hw->nvm_buff);
710
711         return ret_code;
712 }
713
714 /**
715  *  i40e_clean_asq - cleans Admin send queue
716  *  @hw: pointer to the hardware structure
717  *
718  *  returns the number of free desc
719  **/
720 u16 i40e_clean_asq(struct i40e_hw *hw)
721 {
722         struct i40e_adminq_ring *asq = &(hw->aq.asq);
723         struct i40e_asq_cmd_details *details;
724         u16 ntc = asq->next_to_clean;
725         struct i40e_aq_desc desc_cb;
726         struct i40e_aq_desc *desc;
727
728         desc = I40E_ADMINQ_DESC(*asq, ntc);
729         details = I40E_ADMINQ_DETAILS(*asq, ntc);
730         while (rd32(hw, hw->aq.asq.head) != ntc) {
731                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
732                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
733
734                 if (details->callback) {
735                         I40E_ADMINQ_CALLBACK cb_func =
736                                         (I40E_ADMINQ_CALLBACK)details->callback;
737                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
738                                     I40E_DMA_TO_DMA);
739                         cb_func(hw, &desc_cb);
740                 }
741                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
742                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
743                 ntc++;
744                 if (ntc == asq->count)
745                         ntc = 0;
746                 desc = I40E_ADMINQ_DESC(*asq, ntc);
747                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
748         }
749
750         asq->next_to_clean = ntc;
751
752         return I40E_DESC_UNUSED(asq);
753 }
754
755 /**
756  *  i40e_asq_done - check if FW has processed the Admin Send Queue
757  *  @hw: pointer to the hw struct
758  *
759  *  Returns TRUE if the firmware has processed all descriptors on the
760  *  admin send queue. Returns FALSE if there are still requests pending.
761  **/
762 bool i40e_asq_done(struct i40e_hw *hw)
763 {
764         /* AQ designers suggest use of head for better
765          * timing reliability than DD bit
766          */
767         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
768
769 }
770
771 /**
772  *  i40e_asq_send_command - send command to Admin Queue
773  *  @hw: pointer to the hw struct
774  *  @desc: prefilled descriptor describing the command (non DMA mem)
775  *  @buff: buffer to use for indirect commands
776  *  @buff_size: size of buffer for indirect commands
777  *  @cmd_details: pointer to command details structure
778  *
779  *  This is the main send command driver routine for the Admin Queue send
780  *  queue.  It runs the queue, cleans the queue, etc
781  **/
782 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
783                                 struct i40e_aq_desc *desc,
784                                 void *buff, /* can be NULL */
785                                 u16  buff_size,
786                                 struct i40e_asq_cmd_details *cmd_details)
787 {
788         enum i40e_status_code status = I40E_SUCCESS;
789         struct i40e_dma_mem *dma_buff = NULL;
790         struct i40e_asq_cmd_details *details;
791         struct i40e_aq_desc *desc_on_ring;
792         bool cmd_completed = FALSE;
793         u16  retval = 0;
794         u32  val = 0;
795
796         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
797
798         hw->aq.asq_last_status = I40E_AQ_RC_OK;
799
800         if (hw->aq.asq.count == 0) {
801                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
802                            "AQTX: Admin queue not initialized.\n");
803                 status = I40E_ERR_QUEUE_EMPTY;
804                 goto asq_send_command_error;
805         }
806
807         val = rd32(hw, hw->aq.asq.head);
808         if (val >= hw->aq.num_asq_entries) {
809                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
810                            "AQTX: head overrun at %d\n", val);
811                 status = I40E_ERR_QUEUE_EMPTY;
812                 goto asq_send_command_error;
813         }
814
815         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
816         if (cmd_details) {
817                 i40e_memcpy(details,
818                             cmd_details,
819                             sizeof(struct i40e_asq_cmd_details),
820                             I40E_NONDMA_TO_NONDMA);
821
822                 /* If the cmd_details are defined copy the cookie.  The
823                  * CPU_TO_LE32 is not needed here because the data is ignored
824                  * by the FW, only used by the driver
825                  */
826                 if (details->cookie) {
827                         desc->cookie_high =
828                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
829                         desc->cookie_low =
830                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
831                 }
832         } else {
833                 i40e_memset(details, 0,
834                             sizeof(struct i40e_asq_cmd_details),
835                             I40E_NONDMA_MEM);
836         }
837
838         /* clear requested flags and then set additional flags if defined */
839         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
840         desc->flags |= CPU_TO_LE16(details->flags_ena);
841
842         if (buff_size > hw->aq.asq_buf_size) {
843                 i40e_debug(hw,
844                            I40E_DEBUG_AQ_MESSAGE,
845                            "AQTX: Invalid buffer size: %d.\n",
846                            buff_size);
847                 status = I40E_ERR_INVALID_SIZE;
848                 goto asq_send_command_error;
849         }
850
851         if (details->postpone && !details->async) {
852                 i40e_debug(hw,
853                            I40E_DEBUG_AQ_MESSAGE,
854                            "AQTX: Async flag not set along with postpone flag");
855                 status = I40E_ERR_PARAM;
856                 goto asq_send_command_error;
857         }
858
859         /* call clean and check queue available function to reclaim the
860          * descriptors that were processed by FW, the function returns the
861          * number of desc available
862          */
863         /* the clean function called here could be called in a separate thread
864          * in case of asynchronous completions
865          */
866         if (i40e_clean_asq(hw) == 0) {
867                 i40e_debug(hw,
868                            I40E_DEBUG_AQ_MESSAGE,
869                            "AQTX: Error queue is full.\n");
870                 status = I40E_ERR_ADMIN_QUEUE_FULL;
871                 goto asq_send_command_error;
872         }
873
874         /* initialize the temp desc pointer with the right desc */
875         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
876
877         /* if the desc is available copy the temp desc to the right place */
878         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
879                     I40E_NONDMA_TO_DMA);
880
881         /* if buff is not NULL assume indirect command */
882         if (buff != NULL) {
883                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
884                 /* copy the user buff into the respective DMA buff */
885                 i40e_memcpy(dma_buff->va, buff, buff_size,
886                             I40E_NONDMA_TO_DMA);
887                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
888
889                 /* Update the address values in the desc with the pa value
890                  * for respective buffer
891                  */
892                 desc_on_ring->params.external.addr_high =
893                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
894                 desc_on_ring->params.external.addr_low =
895                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
896         }
897
898         /* bump the tail */
899         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
900         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
901                       buff, buff_size);
902         (hw->aq.asq.next_to_use)++;
903         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
904                 hw->aq.asq.next_to_use = 0;
905         if (!details->postpone)
906                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
907
908         /* if cmd_details are not defined or async flag is not set,
909          * we need to wait for desc write back
910          */
911         if (!details->async && !details->postpone) {
912                 u32 total_delay = 0;
913
914                 do {
915                         /* AQ designers suggest use of head for better
916                          * timing reliability than DD bit
917                          */
918                         if (i40e_asq_done(hw))
919                                 break;
920                         i40e_usec_delay(50);
921                         total_delay += 50;
922                 } while (total_delay < hw->aq.asq_cmd_timeout);
923         }
924
925         /* if ready, copy the desc back to temp */
926         if (i40e_asq_done(hw)) {
927                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
928                             I40E_DMA_TO_NONDMA);
929                 if (buff != NULL)
930                         i40e_memcpy(buff, dma_buff->va, buff_size,
931                                     I40E_DMA_TO_NONDMA);
932                 retval = LE16_TO_CPU(desc->retval);
933                 if (retval != 0) {
934                         i40e_debug(hw,
935                                    I40E_DEBUG_AQ_MESSAGE,
936                                    "AQTX: Command completed with error 0x%X.\n",
937                                    retval);
938
939                         /* strip off FW internal code */
940                         retval &= 0xff;
941                 }
942                 cmd_completed = TRUE;
943                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
944                         status = I40E_SUCCESS;
945                 else
946                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
947                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
948         }
949
950         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
951                    "AQTX: desc and buffer writeback:\n");
952         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
953
954         /* save writeback aq if requested */
955         if (details->wb_desc)
956                 i40e_memcpy(details->wb_desc, desc_on_ring,
957                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
958
959         /* update the error if time out occurred */
960         if ((!cmd_completed) &&
961             (!details->async && !details->postpone)) {
962                 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
963                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
964                                    "AQTX: AQ Critical error.\n");
965                         status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
966                 } else {
967                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
968                                    "AQTX: Writeback timeout.\n");
969                         status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
970                 }
971         }
972
973 asq_send_command_error:
974         i40e_release_spinlock(&hw->aq.asq_spinlock);
975         return status;
976 }
977
978 /**
979  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
980  *  @desc:     pointer to the temp descriptor (non DMA mem)
981  *  @opcode:   the opcode can be used to decide which flags to turn off or on
982  *
983  *  Fill the desc with default values
984  **/
985 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
986                                        u16 opcode)
987 {
988         /* zero out the desc */
989         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
990                     I40E_NONDMA_MEM);
991         desc->opcode = CPU_TO_LE16(opcode);
992         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
993 }
994
995 /**
996  *  i40e_clean_arq_element
997  *  @hw: pointer to the hw struct
998  *  @e: event info from the receive descriptor, includes any buffers
999  *  @pending: number of events that could be left to process
1000  *
1001  *  This function cleans one Admin Receive Queue element and returns
1002  *  the contents through e.  It can also return how many events are
1003  *  left to process through 'pending'
1004  **/
1005 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1006                                              struct i40e_arq_event_info *e,
1007                                              u16 *pending)
1008 {
1009         enum i40e_status_code ret_code = I40E_SUCCESS;
1010         u16 ntc = hw->aq.arq.next_to_clean;
1011         struct i40e_aq_desc *desc;
1012         struct i40e_dma_mem *bi;
1013         u16 desc_idx;
1014         u16 datalen;
1015         u16 flags;
1016         u16 ntu;
1017
1018         /* pre-clean the event info */
1019         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1020
1021         /* take the lock before we start messing with the ring */
1022         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1023
1024         if (hw->aq.arq.count == 0) {
1025                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1026                            "AQRX: Admin queue not initialized.\n");
1027                 ret_code = I40E_ERR_QUEUE_EMPTY;
1028                 goto clean_arq_element_err;
1029         }
1030
1031         /* set next_to_use to head */
1032         if (!i40e_is_vf(hw))
1033                 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1034         else
1035                 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1036         if (ntu == ntc) {
1037                 /* nothing to do - shouldn't need to update ring's values */
1038                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1039                 goto clean_arq_element_out;
1040         }
1041
1042         /* now clean the next descriptor */
1043         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1044         desc_idx = ntc;
1045
1046         hw->aq.arq_last_status =
1047                 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1048         flags = LE16_TO_CPU(desc->flags);
1049         if (flags & I40E_AQ_FLAG_ERR) {
1050                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1051                 i40e_debug(hw,
1052                            I40E_DEBUG_AQ_MESSAGE,
1053                            "AQRX: Event received with error 0x%X.\n",
1054                            hw->aq.arq_last_status);
1055         }
1056
1057         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1058                     I40E_DMA_TO_NONDMA);
1059         datalen = LE16_TO_CPU(desc->datalen);
1060         e->msg_len = min(datalen, e->buf_len);
1061         if (e->msg_buf != NULL && (e->msg_len != 0))
1062                 i40e_memcpy(e->msg_buf,
1063                             hw->aq.arq.r.arq_bi[desc_idx].va,
1064                             e->msg_len, I40E_DMA_TO_NONDMA);
1065
1066         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1067         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1068                       hw->aq.arq_buf_size);
1069
1070         /* Restore the original datalen and buffer address in the desc,
1071          * FW updates datalen to indicate the event message
1072          * size
1073          */
1074         bi = &hw->aq.arq.r.arq_bi[ntc];
1075         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1076
1077         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1078         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1079                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1080         desc->datalen = CPU_TO_LE16((u16)bi->size);
1081         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1082         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1083
1084         /* set tail = the last cleaned desc index. */
1085         wr32(hw, hw->aq.arq.tail, ntc);
1086         /* ntc is updated to tail + 1 */
1087         ntc++;
1088         if (ntc == hw->aq.num_arq_entries)
1089                 ntc = 0;
1090         hw->aq.arq.next_to_clean = ntc;
1091         hw->aq.arq.next_to_use = ntu;
1092
1093         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1094 clean_arq_element_out:
1095         /* Set pending if needed, unlock and return */
1096         if (pending != NULL)
1097                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1098 clean_arq_element_err:
1099         i40e_release_spinlock(&hw->aq.arq_spinlock);
1100
1101         return ret_code;
1102 }
1103