]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxe/ecore_hw.c
Merge from upstream at 4189ef5d from https://github.com/onetrueawk/awk.git
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxe / ecore_hw.c
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc. 
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File : ecore_hw.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "bcm_osal.h"
35 #include "ecore_hsi_common.h"
36 #include "ecore_status.h"
37 #include "ecore.h"
38 #include "ecore_hw.h"
39 #include "reg_addr.h"
40 #include "ecore_utils.h"
41 #include "ecore_iov_api.h"
42
43 #ifdef _NTDDK_
44 #pragma warning(push)
45 #pragma warning(disable : 28167)
46 #pragma warning(disable : 28123)
47 #pragma warning(disable : 28121)
48 #endif
49
50 #ifndef ASIC_ONLY
51 #define ECORE_EMUL_FACTOR 2000
52 #define ECORE_FPGA_FACTOR 200
53 #endif
54
55 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
56
57 /* Invalid values */
58 #define ECORE_BAR_INVALID_OFFSET        (OSAL_CPU_TO_LE32(-1))
59
60 struct ecore_ptt {
61         osal_list_entry_t       list_entry;
62         unsigned int            idx;
63         struct pxp_ptt_entry    pxp;
64         u8                      hwfn_id;
65 };
66
67 struct ecore_ptt_pool {
68         osal_list_t             free_list;
69         osal_spinlock_t         lock; /* ptt synchronized access */
70         struct ecore_ptt        ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
71 };
72
73 static void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
74 {
75         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
76         p_hwfn->p_ptt_pool = OSAL_NULL;
77 }
78
79 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
80 {
81         struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
82                                                    GFP_KERNEL,
83                                                    sizeof(*p_pool));
84         int i;
85
86         if (!p_pool)
87                 return ECORE_NOMEM;
88
89         OSAL_LIST_INIT(&p_pool->free_list);
90         for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
91                 p_pool->ptts[i].idx = i;
92                 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
93                 p_pool->ptts[i].pxp.pretend.control = 0;
94                 p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
95
96                 /* There are special PTT entries that are taken only by design.
97                  * The rest are added ot the list for general usage.
98                  */
99                 if (i >= RESERVED_PTT_MAX)
100                         OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
101                                             &p_pool->free_list);
102         }
103
104         p_hwfn->p_ptt_pool = p_pool;
105 #ifdef CONFIG_ECORE_LOCK_ALLOC
106         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
107                 __ecore_ptt_pool_free(p_hwfn);
108                 return ECORE_NOMEM;
109         }
110 #endif
111         OSAL_SPIN_LOCK_INIT(&p_pool->lock);
112         return ECORE_SUCCESS;
113 }
114
115 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
116 {
117         struct ecore_ptt *p_ptt;
118         int i;
119
120         for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
121                 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
122                 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
123         }
124 }
125
126 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
127 {
128 #ifdef CONFIG_ECORE_LOCK_ALLOC
129         if (p_hwfn->p_ptt_pool)
130                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
131 #endif
132         __ecore_ptt_pool_free(p_hwfn);
133 }
134
135 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
136 {
137         struct ecore_ptt *p_ptt;
138         unsigned int i;
139
140         /* Take the free PTT from the list */
141         for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
142                 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
143
144                 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
145                         p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
146                                                       struct ecore_ptt, list_entry);
147                         OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
148                                                &p_hwfn->p_ptt_pool->free_list);
149
150                         OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
151
152                         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
153                                    "allocated ptt %d\n", p_ptt->idx);
154
155                         return p_ptt;
156                 }
157
158                 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
159                 OSAL_MSLEEP(1);
160         }
161
162         DP_NOTICE(p_hwfn, true, "PTT acquire timeout - failed to allocate PTT\n");
163         return OSAL_NULL;
164 }
165
166 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
167                        struct ecore_ptt *p_ptt) {
168         /* This PTT should not be set to pretend if it is being released */
169         /* TODO - add some pretend sanity checks, to make sure pretend isn't set on this ptt */
170
171         OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
172         OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
173         OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
174 }
175
176 static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
177 {
178         /* The HW is using DWORDS and we need to translate it to Bytes */
179         return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
180 }
181
182 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
183 {
184         return PXP_PF_WINDOW_ADMIN_PER_PF_START +
185                p_ptt->idx * sizeof(struct pxp_ptt_entry);
186 }
187
188 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
189 {
190         return PXP_EXTERNAL_BAR_PF_WINDOW_START +
191                p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
192 }
193
194 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
195                        struct ecore_ptt *p_ptt,
196                        u32 new_hw_addr)
197 {
198         u32 prev_hw_addr;
199
200         prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
201
202         if (new_hw_addr == prev_hw_addr)
203                 return;
204
205         /* Update PTT entery in admin window */
206         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
207                    "Updating PTT entry %d to offset 0x%x\n",
208                    p_ptt->idx, new_hw_addr);
209
210         /* The HW is using DWORDS and the address is in Bytes */
211         p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
212
213         REG_WR(p_hwfn,
214                ecore_ptt_config_addr(p_ptt) +
215                OFFSETOF(struct pxp_ptt_entry, offset),
216                OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
217 }
218
219 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
220                          struct ecore_ptt *p_ptt,
221                          u32 hw_addr)
222 {
223         u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
224         u32 offset;
225
226         offset = hw_addr - win_hw_addr;
227
228         if (p_ptt->hwfn_id != p_hwfn->my_id)
229                 DP_NOTICE(p_hwfn, true,
230                           "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
231                           p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
232
233         /* Verify the address is within the window */
234         if (hw_addr < win_hw_addr ||
235             offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
236                 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
237                 offset = 0;
238         }
239
240         return ecore_ptt_get_bar_addr(p_ptt) + offset;
241 }
242
243 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
244                                          enum reserved_ptts ptt_idx)
245 {
246         if (ptt_idx >= RESERVED_PTT_MAX) {
247                 DP_NOTICE(p_hwfn, true,
248                           "Requested PTT %d is out of range\n", ptt_idx);
249                 return OSAL_NULL;
250         }
251
252         return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
253 }
254
255 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
256                                     struct ecore_ptt *p_ptt)
257 {
258         bool is_empty = true;
259         u32 bar_addr;
260
261         if (!p_hwfn->p_dev->chk_reg_fifo)
262                 goto out;
263
264         /* ecore_rd() cannot be used here since it calls this function */
265         bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
266         is_empty = REG_RD(p_hwfn, bar_addr) == 0;
267
268 #ifndef ASIC_ONLY
269         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
270                 OSAL_UDELAY(100);
271 #endif
272
273 out:
274         return is_empty;
275 }
276
277 void ecore_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr,
278               u32 val)
279 {
280         bool prev_fifo_err;
281         u32 bar_addr;
282
283         prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
284
285         bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
286         REG_WR(p_hwfn, bar_addr, val);
287         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
288                    "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
289                    bar_addr, hw_addr, val);
290
291 #ifndef ASIC_ONLY
292         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
293                 OSAL_UDELAY(100);
294 #endif
295
296         OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
297                   "reg_fifo error was caused by a call to ecore_wr(0x%x, 0x%x)\n",
298                   hw_addr, val);
299 }
300
301 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
302 {
303         bool prev_fifo_err;
304         u32 bar_addr, val;
305
306         prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
307
308         bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
309         val = REG_RD(p_hwfn, bar_addr);
310
311         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
312                    "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
313                    bar_addr, hw_addr, val);
314
315 #ifndef ASIC_ONLY
316         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
317                 OSAL_UDELAY(100);
318 #endif
319
320         OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
321                   "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
322                   hw_addr);
323
324         return val;
325 }
326
327 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
328                             struct ecore_ptt *p_ptt,
329                             void *addr,
330                             u32 hw_addr,
331                             osal_size_t n,
332                             bool to_device)
333 {
334         u32 dw_count, *host_addr, hw_offset;
335         osal_size_t quota, done = 0;
336         u32 OSAL_IOMEM *reg_addr;
337
338         while (done < n) {
339                 quota = OSAL_MIN_T(osal_size_t, n - done,
340                                    PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
341
342                 if (IS_PF(p_hwfn->p_dev)) {
343                         ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
344                         hw_offset = ecore_ptt_get_bar_addr(p_ptt);
345                 } else {
346                         hw_offset = hw_addr + done;
347                 }
348
349                 dw_count = quota / 4;
350                 host_addr = (u32 *)((u8 *)addr + done);
351                 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
352
353                 if (to_device)
354                         while (dw_count--)
355                                 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
356                 else
357                         while (dw_count--)
358                                 *host_addr++ = DIRECT_REG_RD(p_hwfn,
359                                                              reg_addr++);
360
361                 done += quota;
362         }
363 }
364
365 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
366                        struct ecore_ptt *p_ptt,
367                        void *dest, u32 hw_addr, osal_size_t n)
368 {
369         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
370                    "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
371                    hw_addr, dest, hw_addr, (unsigned long) n);
372
373         ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
374 }
375
376 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
377                      struct ecore_ptt *p_ptt,
378                      u32 hw_addr, void *src, osal_size_t n)
379 {
380         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
381                    "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
382                    hw_addr, hw_addr, src, (unsigned long)n);
383
384         ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
385 }
386
387 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
388                        struct ecore_ptt *p_ptt, u16 fid)
389 {
390         u16 control = 0;
391
392         SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
393         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
394
395         /* Every pretend undos previous pretends, including
396          * previous port pretend.
397          */
398         SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
399         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
400         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
401
402         if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
403                 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
404
405         p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
406         p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
407
408         REG_WR(p_hwfn,
409                ecore_ptt_config_addr(p_ptt) +
410                OFFSETOF(struct pxp_ptt_entry, pretend),
411                *(u32 *)&p_ptt->pxp.pretend);
412 }
413
414 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
415                         struct ecore_ptt *p_ptt, u8 port_id)
416 {
417         u16 control = 0;
418
419         SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
420         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
421         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
422         p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
423
424         REG_WR(p_hwfn,
425                ecore_ptt_config_addr(p_ptt) +
426                OFFSETOF(struct pxp_ptt_entry, pretend),
427                *(u32 *)&p_ptt->pxp.pretend);
428 }
429
430 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
431                           struct ecore_ptt *p_ptt)
432 {
433         u16 control = 0;
434
435         SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
436         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
437         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
438
439         p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
440
441         REG_WR(p_hwfn,
442                ecore_ptt_config_addr(p_ptt) +
443                OFFSETOF(struct pxp_ptt_entry, pretend),
444                *(u32 *)&p_ptt->pxp.pretend);
445 }
446
447 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
448 {
449         u32 concrete_fid = 0;
450
451         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
452         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
453         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
454
455         return concrete_fid;
456 }
457
458 #if 0
459 /* Ecore HW lock
460  * =============
461  * Although the implemention is ready, today we don't have any flow that
462  * utliizes said locks - and we want to keep it this way.
463  * If this changes, this needs to be revisted.
464  */
465 #define HW_LOCK_MAX_RETRIES 1000
466 enum _ecore_status_t ecore_hw_lock(struct ecore_hwfn            *p_hwfn,
467                                    struct ecore_ptt             *p_ptt,
468                                    u8                           resource,
469                                    bool                         block)
470 {
471         u32 cnt, lock_status, hw_lock_cntr_reg;
472         enum _ecore_status_t ecore_status;
473
474         /* Locate the proper lock register for this function.
475          * Note This code assumes all the H/W lock registers are sequential
476          * in memory.
477          */
478         hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
479                            p_hwfn->rel_pf_id *
480                            MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
481
482         /* Validate that the resource is not already taken */
483         lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
484
485         if (lock_status & resource) {
486                 DP_NOTICE(p_hwfn, true,
487                           "Resource already locked: lock_status=0x%x resource=0x%x\n",
488                           lock_status, resource);
489
490                 return ECORE_BUSY;
491         }
492
493         /* Register for the lock */
494         ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg + sizeof(u32), resource);
495
496         /* Try for 5 seconds every 5ms */
497         for (cnt = 0; cnt < HW_LOCK_MAX_RETRIES; cnt++) {
498                 lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
499
500                 if (lock_status & resource)
501                         return ECORE_SUCCESS;
502
503                 if (!block) {
504                         ecore_status = ECORE_BUSY;
505                         break;
506                 }
507
508                 OSAL_MSLEEP(5);
509         }
510
511         if (cnt == HW_LOCK_MAX_RETRIES) {
512                 DP_NOTICE(p_hwfn, true, "Lock timeout resource=0x%x\n",
513                           resource);
514                 ecore_status = ECORE_TIMEOUT;
515         }
516
517         /* Clear the pending request */
518         ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
519
520         return ecore_status;
521 }
522
523 enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn          *p_hwfn,
524                                      struct ecore_ptt           *p_ptt,
525                                      u8                         resource)
526 {
527         u32 lock_status, hw_lock_cntr_reg;
528
529         /* Locate the proper lock register for this function.
530          * Note This code assumes all the H/W lock registers are sequential
531          * in memory.
532          */
533         hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
534                            p_hwfn->rel_pf_id *
535                            MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
536
537         /*  Validate that the resource is currently taken */
538         lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
539
540         if (!(lock_status & resource)) {
541                 DP_NOTICE(p_hwfn, true,
542                           "resource 0x%x was not taken (lock status 0x%x)\n",
543                           resource, lock_status);
544
545                 return ECORE_NODEV;
546         }
547
548         /* clear lock for resource */
549         ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
550         return ECORE_SUCCESS;
551 }
552 #endif /* HW locks logic */
553
554 /* DMAE */
555
556 #define ECORE_DMAE_FLAGS_IS_SET(params, flag)   \
557         ((params) != OSAL_NULL && ((params)->flags & ECORE_DMAE_FLAG_##flag))
558
559 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
560                               const u8  is_src_type_grc,
561                               const u8  is_dst_type_grc,
562                               struct ecore_dmae_params *p_params)
563 {
564         u8 src_pfid, dst_pfid, port_id;
565         u16 opcode_b = 0;
566         u32 opcode = 0;
567
568         /* Whether the source is the PCIe or the GRC.
569          * 0- The source is the PCIe
570          * 1- The source is the GRC.
571          */
572         opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
573                                    : DMAE_CMD_SRC_MASK_PCIE) <<
574                   DMAE_CMD_SRC_SHIFT;
575         src_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
576                    p_params->src_pfid : p_hwfn->rel_pf_id;
577         opcode |= (src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
578                   DMAE_CMD_SRC_PF_ID_SHIFT;
579
580         /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
581         opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
582                                    : DMAE_CMD_DST_MASK_PCIE) <<
583                   DMAE_CMD_DST_SHIFT;
584         dst_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
585                    p_params->dst_pfid : p_hwfn->rel_pf_id;
586         opcode |= (dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
587                   DMAE_CMD_DST_PF_ID_SHIFT;
588
589         /* DMAE_E4_TODO need to check which value to specify here. */
590         /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;*/
591
592         /* Whether to write a completion word to the completion destination:
593          * 0-Do not write a completion word
594          * 1-Write the completion word
595          */
596         opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
597         opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
598                   DMAE_CMD_SRC_ADDR_RESET_SHIFT;
599
600         if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
601                 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
602
603         /* swapping mode 3 - big endian there should be a define ifdefed in
604          * the HSI somewhere. Since it is currently
605          */
606         opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
607
608         port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
609                   p_params->port_id : p_hwfn->port_id;
610         opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
611
612         /* reset source address in next go */
613         opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
614                   DMAE_CMD_SRC_ADDR_RESET_SHIFT;
615
616         /* reset dest address in next go */
617         opcode |= DMAE_CMD_DST_ADDR_RESET_MASK <<
618                   DMAE_CMD_DST_ADDR_RESET_SHIFT;
619
620         /* SRC/DST VFID: all 1's - pf, otherwise VF id */
621         if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
622                 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
623                 opcode_b |= (p_params->src_vfid <<  DMAE_CMD_SRC_VF_ID_SHIFT);
624         } else {
625                 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
626                              DMAE_CMD_SRC_VF_ID_SHIFT);
627         }
628         if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
629                 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
630                 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
631         } else {
632                 opcode_b |= DMAE_CMD_DST_VF_ID_MASK <<
633                             DMAE_CMD_DST_VF_ID_SHIFT;
634         }
635
636         p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
637         p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
638 }
639
640 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
641 {
642         OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) !=
643                           31 * 4);
644
645         /* All the DMAE 'go' registers form an array in internal memory */
646         return DMAE_REG_GO_C0 + (idx << 2);
647 }
648
649 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
650                                                     struct ecore_ptt *p_ptt)
651 {
652         struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
653         u8 idx_cmd = p_hwfn->dmae_info.channel, i;
654         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
655
656         /* verify address is not OSAL_NULL */
657         if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
658              ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
659                 DP_NOTICE(p_hwfn, true,
660                           "source or destination address 0 idx_cmd=%d\n"
661                           "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
662                           idx_cmd,
663                           OSAL_LE32_TO_CPU(p_command->opcode),
664                           OSAL_LE16_TO_CPU(p_command->opcode_b),
665                           OSAL_LE16_TO_CPU(p_command->length_dw),
666                           OSAL_LE32_TO_CPU(p_command->src_addr_hi),
667                           OSAL_LE32_TO_CPU(p_command->src_addr_lo),
668                           OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
669                           OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
670
671                 return ECORE_INVAL;
672         }
673
674         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
675                    "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
676                    idx_cmd,
677                    OSAL_LE32_TO_CPU(p_command->opcode),
678                    OSAL_LE16_TO_CPU(p_command->opcode_b),
679                    OSAL_LE16_TO_CPU(p_command->length_dw),
680                    OSAL_LE32_TO_CPU(p_command->src_addr_hi),
681                    OSAL_LE32_TO_CPU(p_command->src_addr_lo),
682                    OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
683                    OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
684
685         /* Copy the command to DMAE - need to do it before every call
686          * for source/dest address no reset.
687          * The number of commands have been increased to 16 (previous was 14)
688          * The first 9 DWs are the command registers, the 10 DW is the
689          * GO register, and
690          * the rest are result registers (which are read only by the client).
691          */
692         for (i = 0; i < DMAE_CMD_SIZE; i++) {
693                 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
694                             *(((u32 *)p_command) + i) : 0;
695
696                 ecore_wr(p_hwfn, p_ptt,
697                          DMAE_REG_CMD_MEM +
698                          (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
699                          (i * sizeof(u32)), data);
700         }
701
702         ecore_wr(p_hwfn, p_ptt,
703                  ecore_dmae_idx_to_go_cmd(idx_cmd),
704                  DMAE_GO_VALUE);
705
706         return ecore_status;
707 }
708
709 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
710 {
711         dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
712         struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
713         u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
714         u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
715
716         *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
717         if (*p_comp == OSAL_NULL) {
718                 DP_NOTICE(p_hwfn, false,
719                           "Failed to allocate `p_completion_word'\n");
720                 goto err;
721         }
722
723         p_addr =  &p_hwfn->dmae_info.dmae_cmd_phys_addr;
724         *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
725                                          sizeof(struct dmae_cmd));
726         if (*p_cmd == OSAL_NULL) {
727                 DP_NOTICE(p_hwfn, false,
728                           "Failed to allocate `struct dmae_cmd'\n");
729                 goto err;
730         }
731
732         p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
733         *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
734                                           sizeof(u32) * DMAE_MAX_RW_SIZE);
735         if (*p_buff == OSAL_NULL) {
736                 DP_NOTICE(p_hwfn, false,
737                           "Failed to allocate `intermediate_buffer'\n");
738                 goto err;
739         }
740
741                 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
742                 p_hwfn->dmae_info.b_mem_ready = true;
743
744         return ECORE_SUCCESS;
745 err:
746         ecore_dmae_info_free(p_hwfn);
747         return ECORE_NOMEM;
748 }
749
750 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
751 {
752         dma_addr_t p_phys;
753
754         OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
755         p_hwfn->dmae_info.b_mem_ready = false;
756         OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
757
758         if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
759                 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
760                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
761                                        p_hwfn->dmae_info.p_completion_word,
762                                        p_phys, sizeof(u32));
763                 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
764         }
765
766         if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
767                 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
768                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
769                                        p_hwfn->dmae_info.p_dmae_cmd,
770                                        p_phys, sizeof(struct dmae_cmd));
771                 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
772         }
773
774         if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
775                 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
776                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
777                                        p_hwfn->dmae_info.p_intermediate_buffer,
778                                        p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
779                 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
780         }
781 }
782
783 static enum _ecore_status_t
784 ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
785 {
786         u32 wait_cnt_limit = 10000, wait_cnt = 0;
787         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
788
789 #ifndef ASIC_ONLY
790         u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
791                       ECORE_EMUL_FACTOR :
792                       (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
793                        ECORE_FPGA_FACTOR : 1));
794
795         wait_cnt_limit *= factor;
796 #endif
797
798         /* DMAE_E4_TODO : TODO check if we have to call any other function
799          * other than BARRIER to sync the completion_word since we are not
800          * using the volatile keyword for this
801          */
802         OSAL_BARRIER(p_hwfn->p_dev);
803         while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
804                 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
805                 if (++wait_cnt > wait_cnt_limit) {
806                         DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
807                                   "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
808                                   *(p_hwfn->dmae_info.p_completion_word),
809                                   DMAE_COMPLETION_VAL);
810                         ecore_status = ECORE_TIMEOUT;
811                         break;
812                 }
813
814                 /* to sync the completion_word since we are not
815                  * using the volatile keyword for p_completion_word
816                  */
817                 OSAL_BARRIER(p_hwfn->p_dev);
818         }
819
820         if (ecore_status == ECORE_SUCCESS)
821                 *p_hwfn->dmae_info.p_completion_word = 0;
822
823         return ecore_status;
824 }
825
826 static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
827                                                              struct ecore_ptt *p_ptt,
828                                                              u64 src_addr,
829                                                              u64 dst_addr,
830                                                              u8 src_type,
831                                                              u8 dst_type,
832                                                              u32 length_dw)
833 {
834         dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
835         struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
836         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
837
838         switch (src_type) {
839         case ECORE_DMAE_ADDRESS_GRC:
840         case ECORE_DMAE_ADDRESS_HOST_PHYS:
841                 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
842                 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
843                 break;
844         /* for virtual source addresses we use the intermediate buffer. */
845         case ECORE_DMAE_ADDRESS_HOST_VIRT:
846                 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
847                 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
848                 OSAL_MEMCPY(&(p_hwfn->dmae_info.p_intermediate_buffer[0]),
849                             (void *)(osal_uintptr_t)src_addr,
850                             length_dw * sizeof(u32));
851                 break;
852         default:
853                 return ECORE_INVAL;
854         }
855
856         switch (dst_type) {
857         case ECORE_DMAE_ADDRESS_GRC:
858         case ECORE_DMAE_ADDRESS_HOST_PHYS:
859                 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
860                 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
861                 break;
862         /* for virtual destination addresses we use the intermediate buffer. */
863         case ECORE_DMAE_ADDRESS_HOST_VIRT:
864                 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
865                 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
866                 break;
867         default:
868                 return ECORE_INVAL;
869         }
870
871         cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
872 #ifndef __EXTRACT__LINUX__
873         if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
874             src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
875                 OSAL_DMA_SYNC(p_hwfn->p_dev,
876                               (void *)HILO_U64(cmd->src_addr_hi,
877                                                cmd->src_addr_lo),
878                               length_dw * sizeof(u32), false);
879 #endif
880
881         ecore_dmae_post_command(p_hwfn, p_ptt);
882
883         ecore_status = ecore_dmae_operation_wait(p_hwfn);
884
885 #ifndef __EXTRACT__LINUX__
886         /* TODO - is it true ? */
887         if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
888             src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
889                 OSAL_DMA_SYNC(p_hwfn->p_dev,
890                               (void *)HILO_U64(cmd->src_addr_hi,
891                                                cmd->src_addr_lo),
892                               length_dw * sizeof(u32), true);
893 #endif
894
895         if (ecore_status != ECORE_SUCCESS) {
896                 DP_NOTICE(p_hwfn, ECORE_MSG_HW,
897                           "Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x, intermediate buffer 0x%llx.\n",
898                           (unsigned long long)src_addr, (unsigned long long)dst_addr, length_dw,
899                           (unsigned long long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
900                 return ecore_status;
901         }
902
903         if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
904                 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
905                             &p_hwfn->dmae_info.p_intermediate_buffer[0],
906                             length_dw * sizeof(u32));
907
908         return ECORE_SUCCESS;
909 }
910
911 static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
912                                                        struct ecore_ptt *p_ptt,
913                                                        u64 src_addr, u64 dst_addr,
914                                                        u8 src_type, u8 dst_type,
915                                                        u32 size_in_dwords,
916                                                        struct ecore_dmae_params *p_params)
917 {
918         dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
919         u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
920         struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
921         u64 src_addr_split = 0, dst_addr_split = 0;
922         u16 length_limit = DMAE_MAX_RW_SIZE;
923         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
924         u32 offset = 0;
925
926         if (!p_hwfn->dmae_info.b_mem_ready) {
927                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
928                            "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
929                            (unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
930                            size_in_dwords);
931                 return ECORE_NOMEM;
932         }
933
934         if (p_hwfn->p_dev->recov_in_prog) {
935                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
936                            "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
937                            (unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
938                            size_in_dwords);
939                 /* Return success to let the flow to be completed successfully
940                  * w/o any error handling.
941                  */
942                 return ECORE_SUCCESS;
943         }
944
945         if (!cmd) {
946                 DP_NOTICE(p_hwfn, true,
947                           "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
948                           (unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
949                 return ECORE_INVAL;
950         }
951
952         ecore_dmae_opcode(p_hwfn,
953                           (src_type == ECORE_DMAE_ADDRESS_GRC),
954                           (dst_type == ECORE_DMAE_ADDRESS_GRC),
955                           p_params);
956
957         cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
958         cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
959         cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
960
961         /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
962         cnt_split = size_in_dwords / length_limit;
963         length_mod = size_in_dwords % length_limit;
964
965         src_addr_split = src_addr;
966         dst_addr_split = dst_addr;
967
968         for (i = 0; i <= cnt_split; i++) {
969                 offset = length_limit * i;
970
971                 if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
972                         if (src_type == ECORE_DMAE_ADDRESS_GRC)
973                                 src_addr_split = src_addr + offset;
974                         else
975                                 src_addr_split = src_addr + (offset*4);
976                 }
977
978                 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
979                         dst_addr_split = dst_addr + offset;
980                 else
981                         dst_addr_split = dst_addr + (offset*4);
982
983                 length_cur = (cnt_split == i) ? length_mod : length_limit;
984
985                 /* might be zero on last iteration */
986                 if (!length_cur)
987                         continue;
988
989                 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
990                                                                 p_ptt,
991                                                                 src_addr_split,
992                                                                 dst_addr_split,
993                                                                 src_type,
994                                                                 dst_type,
995                                                                 length_cur);
996                 if (ecore_status != ECORE_SUCCESS) {
997                         DP_NOTICE(p_hwfn, false,
998                                   "ecore_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
999                                   ecore_status, (unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
1000
1001                         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
1002                         break;
1003                 }
1004         }
1005
1006         return ecore_status;
1007 }
1008
1009 enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
1010                                          struct ecore_ptt *p_ptt,
1011                                          u64 source_addr,
1012                                          u32 grc_addr,
1013                                          u32 size_in_dwords,
1014                                          struct ecore_dmae_params *p_params)
1015 {
1016         u32 grc_addr_in_dw = grc_addr / sizeof(u32);
1017         enum _ecore_status_t rc;
1018
1019         OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
1020
1021         rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1022                                         grc_addr_in_dw,
1023                                         ECORE_DMAE_ADDRESS_HOST_VIRT,
1024                                         ECORE_DMAE_ADDRESS_GRC,
1025                                         size_in_dwords, p_params);
1026
1027         OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
1028
1029         return rc;
1030 }
1031
1032 enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
1033                                          struct ecore_ptt *p_ptt,
1034                                          u32 grc_addr,
1035                                          dma_addr_t dest_addr,
1036                                          u32 size_in_dwords,
1037                                          struct ecore_dmae_params *p_params)
1038 {
1039         u32 grc_addr_in_dw = grc_addr / sizeof(u32);
1040         enum _ecore_status_t rc;
1041
1042         OSAL_SPIN_LOCK(&(p_hwfn->dmae_info.lock));
1043
1044         rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
1045                                         dest_addr, ECORE_DMAE_ADDRESS_GRC,
1046                                         ECORE_DMAE_ADDRESS_HOST_VIRT,
1047                                         size_in_dwords, p_params);
1048
1049         OSAL_SPIN_UNLOCK(&(p_hwfn->dmae_info.lock));
1050
1051         return rc;
1052 }
1053
1054 enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
1055                                           struct ecore_ptt *p_ptt,
1056                                           dma_addr_t source_addr,
1057                                           dma_addr_t dest_addr,
1058                                           u32 size_in_dwords,
1059                                           struct ecore_dmae_params *p_params)
1060 {
1061         enum _ecore_status_t rc;
1062
1063         OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
1064
1065         rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1066                                         dest_addr,
1067                                         ECORE_DMAE_ADDRESS_HOST_PHYS,
1068                                         ECORE_DMAE_ADDRESS_HOST_PHYS,
1069                                         size_in_dwords,
1070                                         p_params);
1071
1072         OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
1073
1074         return rc;
1075 }
1076
1077 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
1078                          enum ecore_hw_err_type err_type)
1079 {
1080         /* Fan failure cannot be masked by handling of another HW error */
1081         if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
1082                 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
1083                            "Recovery is in progress. Avoid notifying about HW error %d.\n",
1084                            err_type);
1085                 return;
1086         }
1087
1088         OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
1089 }
1090
1091 enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
1092                                        struct ecore_ptt *p_ptt,
1093                                        const char *phase)
1094 {
1095         u32 size = OSAL_PAGE_SIZE / 2, val;
1096         enum _ecore_status_t rc = ECORE_SUCCESS;
1097         dma_addr_t p_phys;
1098         void *p_virt;
1099         u32 *p_tmp;
1100
1101         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
1102         if (!p_virt) {
1103                 DP_NOTICE(p_hwfn, false,
1104                           "DMAE sanity [%s]: failed to allocate memory\n",
1105                           phase);
1106                 return ECORE_NOMEM;
1107         }
1108
1109         /* Fill the bottom half of the allocated memory with a known pattern */
1110         for (p_tmp = (u32 *)p_virt;
1111              p_tmp < (u32 *)((u8 *)p_virt + size);
1112              p_tmp++) {
1113                 /* Save the address itself as the value */
1114                 val = (u32)(osal_uintptr_t)p_tmp;
1115                 *p_tmp = val;
1116         }
1117
1118         /* Zero the top half of the allocated memory */
1119         OSAL_MEM_ZERO((u8 *)p_virt + size, size);
1120
1121         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1122                    "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
1123                    phase, (unsigned long long)p_phys, p_virt,
1124                    (unsigned long long)(p_phys + size), (u8 *)p_virt + size,
1125                    size);
1126
1127         rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
1128                                   size / 4 /* size_in_dwords */,
1129                                   OSAL_NULL /* default parameters */);
1130         if (rc != ECORE_SUCCESS) {
1131                 DP_NOTICE(p_hwfn, false,
1132                           "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
1133                           phase, rc);
1134                 goto out;
1135         }
1136
1137         /* Verify that the top half of the allocated memory has the pattern */
1138         for (p_tmp = (u32 *)((u8 *)p_virt + size);
1139              p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
1140              p_tmp++) {
1141                 /* The corresponding address in the bottom half */
1142                 val = (u32)(osal_uintptr_t)p_tmp - size;
1143
1144                 if (*p_tmp != val) {
1145                         DP_NOTICE(p_hwfn, false,
1146                                   "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
1147                                   phase,
1148                                   (unsigned long long)(p_phys + (u32)((u8 *)p_tmp - (u8 *)p_virt)),
1149                                   p_tmp, *p_tmp, val);
1150                         rc = ECORE_UNKNOWN_ERROR;
1151                         goto out;
1152                 }
1153         }
1154
1155 out:
1156         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
1157         return rc;
1158 }
1159
1160 void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1161                     u8 abs_ppfid, u32 hw_addr, u32 val)
1162 {
1163         u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
1164
1165         ecore_fid_pretend(p_hwfn, p_ptt,
1166                           pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1167         ecore_wr(p_hwfn, p_ptt, hw_addr, val);
1168         ecore_fid_pretend(p_hwfn, p_ptt,
1169                           p_hwfn->rel_pf_id <<
1170                           PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1171 }
1172
1173 u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1174                    u8 abs_ppfid, u32 hw_addr)
1175 {
1176         u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
1177         u32 val;
1178
1179         ecore_fid_pretend(p_hwfn, p_ptt,
1180                           pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1181         val = ecore_rd(p_hwfn, p_ptt, hw_addr);
1182         ecore_fid_pretend(p_hwfn, p_ptt,
1183                           p_hwfn->rel_pf_id <<
1184                           PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1185
1186         return val;
1187 }
1188
1189 #ifdef _NTDDK_
1190 #pragma warning(pop)
1191 #endif