]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/bxe/ecore_sp.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / bxe / ecore_sp.c
1 /*-
2  * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written consent.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "bxe.h"
38 #include "ecore_init.h"
39
40 /**** Exe Queue interfaces ****/
41
42 /**
43  * ecore_exe_queue_init - init the Exe Queue object
44  *
45  * @o:          pointer to the object
46  * @exe_len:    length
47  * @owner:      pointer to the owner
48  * @validate:   validate function pointer
49  * @optimize:   optimize function pointer
50  * @exec:       execute function pointer
51  * @get:        get function pointer
52  */
53 static inline void ecore_exe_queue_init(struct bxe_softc *sc,
54                                         struct ecore_exe_queue_obj *o,
55                                         int exe_len,
56                                         union ecore_qable_obj *owner,
57                                         exe_q_validate validate,
58                                         exe_q_remove remove,
59                                         exe_q_optimize optimize,
60                                         exe_q_execute exec,
61                                         exe_q_get get)
62 {
63         ECORE_MEMSET(o, 0, sizeof(*o));
64
65         ECORE_LIST_INIT(&o->exe_queue);
66         ECORE_LIST_INIT(&o->pending_comp);
67
68         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
69
70         o->exe_chunk_len = exe_len;
71         o->owner         = owner;
72
73         /* Owner specific callbacks */
74         o->validate      = validate;
75         o->remove        = remove;
76         o->optimize      = optimize;
77         o->execute       = exec;
78         o->get           = get;
79
80         ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
81                   exe_len);
82 }
83
84 static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
85                                              struct ecore_exeq_elem *elem)
86 {
87         ECORE_MSG(sc, "Deleting an exe_queue element\n");
88         ECORE_FREE(sc, elem, sizeof(*elem));
89 }
90
91 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
92 {
93         struct ecore_exeq_elem *elem;
94         int cnt = 0;
95
96         ECORE_SPIN_LOCK_BH(&o->lock);
97
98         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
99                                   struct ecore_exeq_elem)
100                 cnt++;
101
102         ECORE_SPIN_UNLOCK_BH(&o->lock);
103
104         return cnt;
105 }
106
107 /**
108  * ecore_exe_queue_add - add a new element to the execution queue
109  *
110  * @sc:         driver handle
111  * @o:          queue
112  * @cmd:        new command to add
113  * @restore:    true - do not optimize the command
114  *
115  * If the element is optimized or is illegal, frees it.
116  */
117 static inline int ecore_exe_queue_add(struct bxe_softc *sc,
118                                       struct ecore_exe_queue_obj *o,
119                                       struct ecore_exeq_elem *elem,
120                                       bool restore)
121 {
122         int rc;
123
124         ECORE_SPIN_LOCK_BH(&o->lock);
125
126         if (!restore) {
127                 /* Try to cancel this element queue */
128                 rc = o->optimize(sc, o->owner, elem);
129                 if (rc)
130                         goto free_and_exit;
131
132                 /* Check if this request is ok */
133                 rc = o->validate(sc, o->owner, elem);
134                 if (rc) {
135                         ECORE_MSG(sc, "Preamble failed: %d\n", rc);
136                         goto free_and_exit;
137                 }
138         }
139
140         /* If so, add it to the execution queue */
141         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
142
143         ECORE_SPIN_UNLOCK_BH(&o->lock);
144
145         return ECORE_SUCCESS;
146
147 free_and_exit:
148         ecore_exe_queue_free_elem(sc, elem);
149
150         ECORE_SPIN_UNLOCK_BH(&o->lock);
151
152         return rc;
153 }
154
155 static inline void __ecore_exe_queue_reset_pending(
156         struct bxe_softc *sc,
157         struct ecore_exe_queue_obj *o)
158 {
159         struct ecore_exeq_elem *elem;
160
161         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
162                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
163                                               struct ecore_exeq_elem,
164                                               link);
165
166                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
167                 ecore_exe_queue_free_elem(sc, elem);
168         }
169 }
170
171 /**
172  * ecore_exe_queue_step - execute one execution chunk atomically
173  *
174  * @sc:                 driver handle
175  * @o:                  queue
176  * @ramrod_flags:       flags
177  *
178  * (Should be called while holding the exe_queue->lock).
179  */
180 static inline int ecore_exe_queue_step(struct bxe_softc *sc,
181                                        struct ecore_exe_queue_obj *o,
182                                        unsigned long *ramrod_flags)
183 {
184         struct ecore_exeq_elem *elem, spacer;
185         int cur_len = 0, rc;
186
187         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
188
189         /* Next step should not be performed until the current is finished,
190          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
191          * properly clear object internals without sending any command to the FW
192          * which also implies there won't be any completion to clear the
193          * 'pending' list.
194          */
195         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
196                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
197                         ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
198                         __ecore_exe_queue_reset_pending(sc, o);
199                 } else {
200                         return ECORE_PENDING;
201                 }
202         }
203
204         /* Run through the pending commands list and create a next
205          * execution chunk.
206          */
207         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
208                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
209                                               struct ecore_exeq_elem,
210                                               link);
211                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
212
213                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
214                         cur_len += elem->cmd_len;
215                         /* Prevent from both lists being empty when moving an
216                          * element. This will allow the call of
217                          * ecore_exe_queue_empty() without locking.
218                          */
219                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
220                         mb();
221                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
222                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
223                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
224                 } else
225                         break;
226         }
227
228         /* Sanity check */
229         if (!cur_len)
230                 return ECORE_SUCCESS;
231
232         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
233         if (rc < 0)
234                 /* In case of an error return the commands back to the queue
235                  *  and reset the pending_comp.
236                  */
237                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
238         else if (!rc)
239                 /* If zero is returned, means there are no outstanding pending
240                  * completions and we may dismiss the pending list.
241                  */
242                 __ecore_exe_queue_reset_pending(sc, o);
243
244         return rc;
245 }
246
247 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
248 {
249         bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
250
251         /* Don't reorder!!! */
252         mb();
253
254         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
255 }
256
257 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
258         struct bxe_softc *sc)
259 {
260         ECORE_MSG(sc, "Allocating a new exe_queue element\n");
261         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
262                             sc);
263 }
264
265 /************************ raw_obj functions ***********************************/
266 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
267 {
268         /*
269      * !! converts the value returned by ECORE_TEST_BIT such that it
270      * is guaranteed not to be truncated regardless of bool definition.
271          *
272          * Note we cannot simply define the function's return value type
273      * to match the type returned by ECORE_TEST_BIT, as it varies by
274      * platform/implementation.
275          */
276
277         return !!ECORE_TEST_BIT(o->state, o->pstate);
278 }
279
280 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
281 {
282         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
283         ECORE_CLEAR_BIT(o->state, o->pstate);
284         ECORE_SMP_MB_AFTER_CLEAR_BIT();
285 }
286
287 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
288 {
289         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
290         ECORE_SET_BIT(o->state, o->pstate);
291         ECORE_SMP_MB_AFTER_CLEAR_BIT();
292 }
293
294 /**
295  * ecore_state_wait - wait until the given bit(state) is cleared
296  *
297  * @sc:         device handle
298  * @state:      state which is to be cleared
299  * @state_p:    state buffer
300  *
301  */
302 static inline int ecore_state_wait(struct bxe_softc *sc, int state,
303                                    unsigned long *pstate)
304 {
305         /* can take a while if any port is running */
306         int cnt = 5000;
307
308
309         if (CHIP_REV_IS_EMUL(sc))
310                 cnt *= 20;
311
312         ECORE_MSG(sc, "waiting for state to become %d\n", state);
313
314         ECORE_MIGHT_SLEEP();
315         while (cnt--) {
316                 if (!ECORE_TEST_BIT(state, pstate)) {
317 #ifdef ECORE_STOP_ON_ERROR
318                         ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
319 #endif
320                         return ECORE_SUCCESS;
321                 }
322
323                 ECORE_WAIT(sc, delay_us);
324
325                 if (sc->panic)
326                         return ECORE_IO;
327         }
328
329         /* timeout! */
330         ECORE_ERR("timeout waiting for state %d\n", state);
331 #ifdef ECORE_STOP_ON_ERROR
332         ecore_panic();
333 #endif
334
335         return ECORE_TIMEOUT;
336 }
337
338 static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
339 {
340         return ecore_state_wait(sc, raw->state, raw->pstate);
341 }
342
343 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
344 /* credit handling callbacks */
345 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
346 {
347         struct ecore_credit_pool_obj *mp = o->macs_pool;
348
349         ECORE_DBG_BREAK_IF(!mp);
350
351         return mp->get_entry(mp, offset);
352 }
353
354 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
355 {
356         struct ecore_credit_pool_obj *mp = o->macs_pool;
357
358         ECORE_DBG_BREAK_IF(!mp);
359
360         return mp->get(mp, 1);
361 }
362
363 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
364 {
365         struct ecore_credit_pool_obj *vp = o->vlans_pool;
366
367         ECORE_DBG_BREAK_IF(!vp);
368
369         return vp->get_entry(vp, offset);
370 }
371
372 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
373 {
374         struct ecore_credit_pool_obj *vp = o->vlans_pool;
375
376         ECORE_DBG_BREAK_IF(!vp);
377
378         return vp->get(vp, 1);
379 }
380
381 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
382 {
383         struct ecore_credit_pool_obj *mp = o->macs_pool;
384         struct ecore_credit_pool_obj *vp = o->vlans_pool;
385
386         if (!mp->get(mp, 1))
387                 return FALSE;
388
389         if (!vp->get(vp, 1)) {
390                 mp->put(mp, 1);
391                 return FALSE;
392         }
393
394         return TRUE;
395 }
396
397 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
398 {
399         struct ecore_credit_pool_obj *mp = o->macs_pool;
400
401         return mp->put_entry(mp, offset);
402 }
403
404 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
405 {
406         struct ecore_credit_pool_obj *mp = o->macs_pool;
407
408         return mp->put(mp, 1);
409 }
410
411 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
412 {
413         struct ecore_credit_pool_obj *vp = o->vlans_pool;
414
415         return vp->put_entry(vp, offset);
416 }
417
418 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
419 {
420         struct ecore_credit_pool_obj *vp = o->vlans_pool;
421
422         return vp->put(vp, 1);
423 }
424
425 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
426 {
427         struct ecore_credit_pool_obj *mp = o->macs_pool;
428         struct ecore_credit_pool_obj *vp = o->vlans_pool;
429
430         if (!mp->put(mp, 1))
431                 return FALSE;
432
433         if (!vp->put(vp, 1)) {
434                 mp->get(mp, 1);
435                 return FALSE;
436         }
437
438         return TRUE;
439 }
440
441 /**
442  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
443  * head list.
444  *
445  * @sc:         device handle
446  * @o:          vlan_mac object
447  *
448  * @details: Non-blocking implementation; should be called under execution
449  *           queue lock.
450  */
451 static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
452                                             struct ecore_vlan_mac_obj *o)
453 {
454         if (o->head_reader) {
455                 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
456                 return ECORE_BUSY;
457         }
458
459         ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
460         return ECORE_SUCCESS;
461 }
462
463 /**
464  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
465  * which wasn't able to run due to a taken lock on vlan mac head list.
466  *
467  * @sc:         device handle
468  * @o:          vlan_mac object
469  *
470  * @details Should be called under execution queue lock; notice it might release
471  *          and reclaim it during its run.
472  */
473 static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
474                                             struct ecore_vlan_mac_obj *o)
475 {
476         int rc;
477         unsigned long ramrod_flags = o->saved_ramrod_flags;
478
479         ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
480                   ramrod_flags);
481         o->head_exe_request = FALSE;
482         o->saved_ramrod_flags = 0;
483         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
484         if (rc != ECORE_SUCCESS) {
485                 ECORE_ERR("execution of pending commands failed with rc %d\n",
486                           rc);
487 #ifdef ECORE_STOP_ON_ERROR
488                 ecore_panic();
489 #endif
490         }
491 }
492
493 /**
494  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
495  * called due to vlan mac head list lock being taken.
496  *
497  * @sc:                 device handle
498  * @o:                  vlan_mac object
499  * @ramrod_flags:       ramrod flags of missed execution
500  *
501  * @details Should be called under execution queue lock.
502  */
503 static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
504                                     struct ecore_vlan_mac_obj *o,
505                                     unsigned long ramrod_flags)
506 {
507         o->head_exe_request = TRUE;
508         o->saved_ramrod_flags = ramrod_flags;
509         ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
510                   ramrod_flags);
511 }
512
513 /**
514  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
515  *
516  * @sc:                 device handle
517  * @o:                  vlan_mac object
518  *
519  * @details Should be called under execution queue lock. Notice if a pending
520  *          execution exists, it would perform it - possibly releasing and
521  *          reclaiming the execution queue lock.
522  */
523 static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
524                                             struct ecore_vlan_mac_obj *o)
525 {
526         /* It's possible a new pending execution was added since this writer
527          * executed. If so, execute again. [Ad infinitum]
528          */
529         while(o->head_exe_request) {
530                 ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
531                 __ecore_vlan_mac_h_exec_pending(sc, o);
532         }
533 }
534
535 /**
536  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
537  *
538  * @sc:                 device handle
539  * @o:                  vlan_mac object
540  *
541  * @details Notice if a pending execution exists, it would perform it -
542  *          possibly releasing and reclaiming the execution queue lock.
543  */
544 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
545                                    struct ecore_vlan_mac_obj *o)
546 {
547         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
548         __ecore_vlan_mac_h_write_unlock(sc, o);
549         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
550 }
551
552 /**
553  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
554  *
555  * @sc:                 device handle
556  * @o:                  vlan_mac object
557  *
558  * @details Should be called under the execution queue lock. May sleep. May
559  *          release and reclaim execution queue lock during its run.
560  */
561 static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
562                                         struct ecore_vlan_mac_obj *o)
563 {
564         /* If we got here, we're holding lock --> no WRITER exists */
565         o->head_reader++;
566         ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
567                   o->head_reader);
568
569         return ECORE_SUCCESS;
570 }
571
572 /**
573  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
574  *
575  * @sc:                 device handle
576  * @o:                  vlan_mac object
577  *
578  * @details May sleep. Claims and releases execution queue lock during its run.
579  */
580 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
581                                struct ecore_vlan_mac_obj *o)
582 {
583         int rc;
584
585         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
586         rc = __ecore_vlan_mac_h_read_lock(sc, o);
587         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
588
589         return rc;
590 }
591
592 /**
593  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
594  *
595  * @sc:                 device handle
596  * @o:                  vlan_mac object
597  *
598  * @details Should be called under execution queue lock. Notice if a pending
599  *          execution exists, it would be performed if this was the last
600  *          reader. possibly releasing and reclaiming the execution queue lock.
601  */
602 static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
603                                           struct ecore_vlan_mac_obj *o)
604 {
605         if (!o->head_reader) {
606                 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
607 #ifdef ECORE_STOP_ON_ERROR
608                 ecore_panic();
609 #endif
610         } else {
611                 o->head_reader--;
612                 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
613                           o->head_reader);
614         }
615
616         /* It's possible a new pending execution was added, and that this reader
617          * was last - if so we need to execute the command.
618          */
619         if (!o->head_reader && o->head_exe_request) {
620                 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
621
622                 /* Writer release will do the trick */
623                 __ecore_vlan_mac_h_write_unlock(sc, o);
624         }
625 }
626
627 /**
628  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
629  *
630  * @sc:                 device handle
631  * @o:                  vlan_mac object
632  *
633  * @details Notice if a pending execution exists, it would be performed if this
634  *          was the last reader. Claims and releases the execution queue lock
635  *          during its run.
636  */
637 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
638                                   struct ecore_vlan_mac_obj *o)
639 {
640         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
641         __ecore_vlan_mac_h_read_unlock(sc, o);
642         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
643 }
644
645 /**
646  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
647  *
648  * @sc:                 device handle
649  * @o:                  vlan_mac object
650  * @n:                  number of elements to get
651  * @base:               base address for element placement
652  * @stride:             stride between elements (in bytes)
653  */
654 static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
655                                  int n, uint8_t *base, uint8_t stride, uint8_t size)
656 {
657         struct ecore_vlan_mac_registry_elem *pos;
658         uint8_t *next = base;
659         int counter = 0;
660         int read_lock;
661
662         ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
663         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
664         if (read_lock != ECORE_SUCCESS)
665                 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
666
667         /* traverse list */
668         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
669                                   struct ecore_vlan_mac_registry_elem) {
670                 if (counter < n) {
671                         ECORE_MEMCPY(next, &pos->u, size);
672                         counter++;
673                         ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
674                                   counter, next);
675                         next += stride + size;
676                 }
677         }
678
679         if (read_lock == ECORE_SUCCESS) {
680                 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
681                 ecore_vlan_mac_h_read_unlock(sc, o);
682         }
683
684         return counter * ETH_ALEN;
685 }
686
687 /* check_add() callbacks */
688 static int ecore_check_mac_add(struct bxe_softc *sc,
689                                struct ecore_vlan_mac_obj *o,
690                                union ecore_classification_ramrod_data *data)
691 {
692         struct ecore_vlan_mac_registry_elem *pos;
693
694         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
695
696         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
697                 return ECORE_INVAL;
698
699         /* Check if a requested MAC already exists */
700         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
701                                   struct ecore_vlan_mac_registry_elem)
702                 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
703                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
704                         return ECORE_EXISTS;
705
706         return ECORE_SUCCESS;
707 }
708
709 static int ecore_check_vlan_add(struct bxe_softc *sc,
710                                 struct ecore_vlan_mac_obj *o,
711                                 union ecore_classification_ramrod_data *data)
712 {
713         struct ecore_vlan_mac_registry_elem *pos;
714
715         ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
716
717         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
718                                   struct ecore_vlan_mac_registry_elem)
719                 if (data->vlan.vlan == pos->u.vlan.vlan)
720                         return ECORE_EXISTS;
721
722         return ECORE_SUCCESS;
723 }
724
725 static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
726                                     struct ecore_vlan_mac_obj *o,
727                                    union ecore_classification_ramrod_data *data)
728 {
729         struct ecore_vlan_mac_registry_elem *pos;
730
731         ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
732                   data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
733
734         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
735                                   struct ecore_vlan_mac_registry_elem)
736                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
737                     (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
738                                   ETH_ALEN)) &&
739                     (data->vlan_mac.is_inner_mac ==
740                      pos->u.vlan_mac.is_inner_mac))
741                         return ECORE_EXISTS;
742
743         return ECORE_SUCCESS;
744 }
745
746 /* check_del() callbacks */
747 static struct ecore_vlan_mac_registry_elem *
748         ecore_check_mac_del(struct bxe_softc *sc,
749                             struct ecore_vlan_mac_obj *o,
750                             union ecore_classification_ramrod_data *data)
751 {
752         struct ecore_vlan_mac_registry_elem *pos;
753
754         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
755
756         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
757                                   struct ecore_vlan_mac_registry_elem)
758                 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
759                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
760                         return pos;
761
762         return NULL;
763 }
764
765 static struct ecore_vlan_mac_registry_elem *
766         ecore_check_vlan_del(struct bxe_softc *sc,
767                              struct ecore_vlan_mac_obj *o,
768                              union ecore_classification_ramrod_data *data)
769 {
770         struct ecore_vlan_mac_registry_elem *pos;
771
772         ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
773
774         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
775                                   struct ecore_vlan_mac_registry_elem)
776                 if (data->vlan.vlan == pos->u.vlan.vlan)
777                         return pos;
778
779         return NULL;
780 }
781
782 static struct ecore_vlan_mac_registry_elem *
783         ecore_check_vlan_mac_del(struct bxe_softc *sc,
784                                  struct ecore_vlan_mac_obj *o,
785                                  union ecore_classification_ramrod_data *data)
786 {
787         struct ecore_vlan_mac_registry_elem *pos;
788
789         ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
790                   data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
791
792         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
793                                   struct ecore_vlan_mac_registry_elem)
794                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
795                     (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
796                              ETH_ALEN)) &&
797                     (data->vlan_mac.is_inner_mac ==
798                      pos->u.vlan_mac.is_inner_mac))
799                         return pos;
800
801         return NULL;
802 }
803
804 /* check_move() callback */
805 static bool ecore_check_move(struct bxe_softc *sc,
806                              struct ecore_vlan_mac_obj *src_o,
807                              struct ecore_vlan_mac_obj *dst_o,
808                              union ecore_classification_ramrod_data *data)
809 {
810         struct ecore_vlan_mac_registry_elem *pos;
811         int rc;
812
813         /* Check if we can delete the requested configuration from the first
814          * object.
815          */
816         pos = src_o->check_del(sc, src_o, data);
817
818         /*  check if configuration can be added */
819         rc = dst_o->check_add(sc, dst_o, data);
820
821         /* If this classification can not be added (is already set)
822          * or can't be deleted - return an error.
823          */
824         if (rc || !pos)
825                 return FALSE;
826
827         return TRUE;
828 }
829
830 static bool ecore_check_move_always_err(
831         struct bxe_softc *sc,
832         struct ecore_vlan_mac_obj *src_o,
833         struct ecore_vlan_mac_obj *dst_o,
834         union ecore_classification_ramrod_data *data)
835 {
836         return FALSE;
837 }
838
839 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
840 {
841         struct ecore_raw_obj *raw = &o->raw;
842         uint8_t rx_tx_flag = 0;
843
844         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
845             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
846                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
847
848         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
849             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
850                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
851
852         return rx_tx_flag;
853 }
854
855 void ecore_set_mac_in_nig(struct bxe_softc *sc,
856                           bool add, unsigned char *dev_addr, int index)
857 {
858         uint32_t wb_data[2];
859         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
860                          NIG_REG_LLH0_FUNC_MEM;
861
862         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
863                 return;
864
865         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
866                 return;
867
868         ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
869                   (add ? "ADD" : "DELETE"), index);
870
871         if (add) {
872                 /* LLH_FUNC_MEM is a uint64_t WB register */
873                 reg_offset += 8*index;
874
875                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
876                               (dev_addr[4] <<  8) |  dev_addr[5]);
877                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
878
879                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
880         }
881
882         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
883                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
884 }
885
886 /**
887  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
888  *
889  * @sc:         device handle
890  * @o:          queue for which we want to configure this rule
891  * @add:        if TRUE the command is an ADD command, DEL otherwise
892  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
893  * @hdr:        pointer to a header to setup
894  *
895  */
896 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
897         struct ecore_vlan_mac_obj *o, bool add, int opcode,
898         struct eth_classify_cmd_header *hdr)
899 {
900         struct ecore_raw_obj *raw = &o->raw;
901
902         hdr->client_id = raw->cl_id;
903         hdr->func_id = raw->func_id;
904
905         /* Rx or/and Tx (internal switching) configuration ? */
906         hdr->cmd_general_data |=
907                 ecore_vlan_mac_get_rx_tx_flag(o);
908
909         if (add)
910                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
911
912         hdr->cmd_general_data |=
913                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
914 }
915
916 /**
917  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
918  *
919  * @cid:        connection id
920  * @type:       ECORE_FILTER_XXX_PENDING
921  * @hdr:        pointer to header to setup
922  * @rule_cnt:
923  *
924  * currently we always configure one rule and echo field to contain a CID and an
925  * opcode type.
926  */
927 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
928                                 struct eth_classify_header *hdr, int rule_cnt)
929 {
930         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
931                                 (type << ECORE_SWCID_SHIFT));
932         hdr->rule_cnt = (uint8_t)rule_cnt;
933 }
934
935 /* hw_config() callbacks */
936 static void ecore_set_one_mac_e2(struct bxe_softc *sc,
937                                  struct ecore_vlan_mac_obj *o,
938                                  struct ecore_exeq_elem *elem, int rule_idx,
939                                  int cam_offset)
940 {
941         struct ecore_raw_obj *raw = &o->raw;
942         struct eth_classify_rules_ramrod_data *data =
943                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
944         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
945         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
946         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
947         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
948         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
949
950         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
951          * relevant. In addition, current implementation is tuned for a
952          * single ETH MAC.
953          *
954          * When multiple unicast ETH MACs PF configuration in switch
955          * independent mode is required (NetQ, multiple netdev MACs,
956          * etc.), consider better utilisation of 8 per function MAC
957          * entries in the LLH register. There is also
958          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
959          * total number of CAM entries to 16.
960          *
961          * Currently we won't configure NIG for MACs other than a primary ETH
962          * MAC and iSCSI L2 MAC.
963          *
964          * If this MAC is moving from one Queue to another, no need to change
965          * NIG configuration.
966          */
967         if (cmd != ECORE_VLAN_MAC_MOVE) {
968                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
969                         ecore_set_mac_in_nig(sc, add, mac,
970                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
971                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
972                         ecore_set_mac_in_nig(sc, add, mac,
973                                              ECORE_LLH_CAM_ETH_LINE);
974         }
975
976         /* Reset the ramrod data buffer for the first rule */
977         if (rule_idx == 0)
978                 ECORE_MEMSET(data, 0, sizeof(*data));
979
980         /* Setup a command header */
981         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
982                                       &rule_entry->mac.header);
983
984         ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
985                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
986
987         /* Set a MAC itself */
988         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
989                               &rule_entry->mac.mac_mid,
990                               &rule_entry->mac.mac_lsb, mac);
991         rule_entry->mac.inner_mac =
992                 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
993
994         /* MOVE: Add a rule that will add this MAC to the target Queue */
995         if (cmd == ECORE_VLAN_MAC_MOVE) {
996                 rule_entry++;
997                 rule_cnt++;
998
999                 /* Setup ramrod data */
1000                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1001                                         elem->cmd_data.vlan_mac.target_obj,
1002                                               TRUE, CLASSIFY_RULE_OPCODE_MAC,
1003                                               &rule_entry->mac.header);
1004
1005                 /* Set a MAC itself */
1006                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1007                                       &rule_entry->mac.mac_mid,
1008                                       &rule_entry->mac.mac_lsb, mac);
1009                 rule_entry->mac.inner_mac =
1010                         elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1011         }
1012
1013         /* Set the ramrod data header */
1014         /* TODO: take this to the higher level in order to prevent multiple
1015                  writing */
1016         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1017                                         rule_cnt);
1018 }
1019
1020 /**
1021  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1022  *
1023  * @sc:         device handle
1024  * @o:          queue
1025  * @type:
1026  * @cam_offset: offset in cam memory
1027  * @hdr:        pointer to a header to setup
1028  *
1029  * E1/E1H
1030  */
1031 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1032         struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1033         struct mac_configuration_hdr *hdr)
1034 {
1035         struct ecore_raw_obj *r = &o->raw;
1036
1037         hdr->length = 1;
1038         hdr->offset = (uint8_t)cam_offset;
1039         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1040         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1041                                 (type << ECORE_SWCID_SHIFT));
1042 }
1043
1044 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1045         struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1046         uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1047 {
1048         struct ecore_raw_obj *r = &o->raw;
1049         uint32_t cl_bit_vec = (1 << r->cl_id);
1050
1051         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1052         cfg_entry->pf_id = r->func_id;
1053         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1054
1055         if (add) {
1056                 ECORE_SET_FLAG(cfg_entry->flags,
1057                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1058                                T_ETH_MAC_COMMAND_SET);
1059                 ECORE_SET_FLAG(cfg_entry->flags,
1060                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1061                                opcode);
1062
1063                 /* Set a MAC in a ramrod data */
1064                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1065                                       &cfg_entry->middle_mac_addr,
1066                                       &cfg_entry->lsb_mac_addr, mac);
1067         } else
1068                 ECORE_SET_FLAG(cfg_entry->flags,
1069                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1070                                T_ETH_MAC_COMMAND_INVALIDATE);
1071 }
1072
1073 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1074         struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1075         uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1076 {
1077         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1078         struct ecore_raw_obj *raw = &o->raw;
1079
1080         ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1081                                          &config->hdr);
1082         ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1083                                          cfg_entry);
1084
1085         ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1086                   (add ? "setting" : "clearing"),
1087                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1088 }
1089
1090 /**
1091  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1092  *
1093  * @sc:         device handle
1094  * @o:          ecore_vlan_mac_obj
1095  * @elem:       ecore_exeq_elem
1096  * @rule_idx:   rule_idx
1097  * @cam_offset: cam_offset
1098  */
1099 static void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1100                                   struct ecore_vlan_mac_obj *o,
1101                                   struct ecore_exeq_elem *elem, int rule_idx,
1102                                   int cam_offset)
1103 {
1104         struct ecore_raw_obj *raw = &o->raw;
1105         struct mac_configuration_cmd *config =
1106                 (struct mac_configuration_cmd *)(raw->rdata);
1107         /* 57710 and 57711 do not support MOVE command,
1108          * so it's either ADD or DEL
1109          */
1110         bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1111                 TRUE : FALSE;
1112
1113         /* Reset the ramrod data buffer */
1114         ECORE_MEMSET(config, 0, sizeof(*config));
1115
1116         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1117                                      cam_offset, add,
1118                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
1119                                      ETH_VLAN_FILTER_ANY_VLAN, config);
1120 }
1121
1122 static void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1123                                   struct ecore_vlan_mac_obj *o,
1124                                   struct ecore_exeq_elem *elem, int rule_idx,
1125                                   int cam_offset)
1126 {
1127         struct ecore_raw_obj *raw = &o->raw;
1128         struct eth_classify_rules_ramrod_data *data =
1129                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1130         int rule_cnt = rule_idx + 1;
1131         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1132         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1133         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1134         uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1135
1136         /* Reset the ramrod data buffer for the first rule */
1137         if (rule_idx == 0)
1138                 ECORE_MEMSET(data, 0, sizeof(*data));
1139
1140         /* Set a rule header */
1141         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1142                                       &rule_entry->vlan.header);
1143
1144         ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1145                   vlan);
1146
1147         /* Set a VLAN itself */
1148         rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1149
1150         /* MOVE: Add a rule that will add this MAC to the target Queue */
1151         if (cmd == ECORE_VLAN_MAC_MOVE) {
1152                 rule_entry++;
1153                 rule_cnt++;
1154
1155                 /* Setup ramrod data */
1156                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1157                                         elem->cmd_data.vlan_mac.target_obj,
1158                                               TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1159                                               &rule_entry->vlan.header);
1160
1161                 /* Set a VLAN itself */
1162                 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1163         }
1164
1165         /* Set the ramrod data header */
1166         /* TODO: take this to the higher level in order to prevent multiple
1167                  writing */
1168         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1169                                         rule_cnt);
1170 }
1171
1172 static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1173                                       struct ecore_vlan_mac_obj *o,
1174                                       struct ecore_exeq_elem *elem,
1175                                       int rule_idx, int cam_offset)
1176 {
1177         struct ecore_raw_obj *raw = &o->raw;
1178         struct eth_classify_rules_ramrod_data *data =
1179                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1180         int rule_cnt = rule_idx + 1;
1181         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1182         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1183         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1184         uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1185         uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1186
1187         /* Reset the ramrod data buffer for the first rule */
1188         if (rule_idx == 0)
1189                 ECORE_MEMSET(data, 0, sizeof(*data));
1190
1191         /* Set a rule header */
1192         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1193                                       &rule_entry->pair.header);
1194
1195         /* Set VLAN and MAC themselves */
1196         rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1197         ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1198                               &rule_entry->pair.mac_mid,
1199                               &rule_entry->pair.mac_lsb, mac);
1200         rule_entry->pair.inner_mac =
1201                         elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1202         /* MOVE: Add a rule that will add this MAC to the target Queue */
1203         if (cmd == ECORE_VLAN_MAC_MOVE) {
1204                 rule_entry++;
1205                 rule_cnt++;
1206
1207                 /* Setup ramrod data */
1208                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1209                                         elem->cmd_data.vlan_mac.target_obj,
1210                                               TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1211                                               &rule_entry->pair.header);
1212
1213                 /* Set a VLAN itself */
1214                 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1215                 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1216                                       &rule_entry->pair.mac_mid,
1217                                       &rule_entry->pair.mac_lsb, mac);
1218                 rule_entry->pair.inner_mac =
1219                         elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1220         }
1221
1222         /* Set the ramrod data header */
1223         /* TODO: take this to the higher level in order to prevent multiple
1224                  writing */
1225         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1226                                         rule_cnt);
1227 }
1228
1229 /**
1230  * ecore_set_one_vlan_mac_e1h -
1231  *
1232  * @sc:         device handle
1233  * @o:          ecore_vlan_mac_obj
1234  * @elem:       ecore_exeq_elem
1235  * @rule_idx:   rule_idx
1236  * @cam_offset: cam_offset
1237  */
1238 static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1239                                        struct ecore_vlan_mac_obj *o,
1240                                        struct ecore_exeq_elem *elem,
1241                                        int rule_idx, int cam_offset)
1242 {
1243         struct ecore_raw_obj *raw = &o->raw;
1244         struct mac_configuration_cmd *config =
1245                 (struct mac_configuration_cmd *)(raw->rdata);
1246         /* 57710 and 57711 do not support MOVE command,
1247          * so it's either ADD or DEL
1248          */
1249         bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1250                 TRUE : FALSE;
1251
1252         /* Reset the ramrod data buffer */
1253         ECORE_MEMSET(config, 0, sizeof(*config));
1254
1255         ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1256                                      cam_offset, add,
1257                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1258                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1259                                      ETH_VLAN_FILTER_CLASSIFY, config);
1260 }
1261
1262 #define list_next_entry(pos, member) \
1263         list_entry((pos)->member.next, typeof(*(pos)), member)
1264
1265 /**
1266  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1267  *
1268  * @sc:         device handle
1269  * @p:          command parameters
1270  * @ppos:       pointer to the cookie
1271  *
1272  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1273  * previously configured elements list.
1274  *
1275  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1276  * into an account
1277  *
1278  * pointer to the cookie  - that should be given back in the next call to make
1279  * function handle the next element. If *ppos is set to NULL it will restart the
1280  * iterator. If returned *ppos == NULL this means that the last element has been
1281  * handled.
1282  *
1283  */
1284 static int ecore_vlan_mac_restore(struct bxe_softc *sc,
1285                            struct ecore_vlan_mac_ramrod_params *p,
1286                            struct ecore_vlan_mac_registry_elem **ppos)
1287 {
1288         struct ecore_vlan_mac_registry_elem *pos;
1289         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1290
1291         /* If list is empty - there is nothing to do here */
1292         if (ECORE_LIST_IS_EMPTY(&o->head)) {
1293                 *ppos = NULL;
1294                 return 0;
1295         }
1296
1297         /* make a step... */
1298         if (*ppos == NULL)
1299                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1300                                             struct ecore_vlan_mac_registry_elem,
1301                                                link);
1302         else
1303                 *ppos = ECORE_LIST_NEXT(*ppos, link,
1304                                         struct ecore_vlan_mac_registry_elem);
1305
1306         pos = *ppos;
1307
1308         /* If it's the last step - return NULL */
1309         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1310                 *ppos = NULL;
1311
1312         /* Prepare a 'user_req' */
1313         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1314
1315         /* Set the command */
1316         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1317
1318         /* Set vlan_mac_flags */
1319         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1320
1321         /* Set a restore bit */
1322         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1323
1324         return ecore_config_vlan_mac(sc, p);
1325 }
1326
1327 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1328  * pointer to an element with a specific criteria and NULL if such an element
1329  * hasn't been found.
1330  */
1331 static struct ecore_exeq_elem *ecore_exeq_get_mac(
1332         struct ecore_exe_queue_obj *o,
1333         struct ecore_exeq_elem *elem)
1334 {
1335         struct ecore_exeq_elem *pos;
1336         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1337
1338         /* Check pending for execution commands */
1339         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1340                                   struct ecore_exeq_elem)
1341                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1342                               sizeof(*data)) &&
1343                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1344                         return pos;
1345
1346         return NULL;
1347 }
1348
1349 static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1350         struct ecore_exe_queue_obj *o,
1351         struct ecore_exeq_elem *elem)
1352 {
1353         struct ecore_exeq_elem *pos;
1354         struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1355
1356         /* Check pending for execution commands */
1357         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1358                                   struct ecore_exeq_elem)
1359                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1360                               sizeof(*data)) &&
1361                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1362                         return pos;
1363
1364         return NULL;
1365 }
1366
1367 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1368         struct ecore_exe_queue_obj *o,
1369         struct ecore_exeq_elem *elem)
1370 {
1371         struct ecore_exeq_elem *pos;
1372         struct ecore_vlan_mac_ramrod_data *data =
1373                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1374
1375         /* Check pending for execution commands */
1376         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1377                                   struct ecore_exeq_elem)
1378                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1379                               sizeof(*data)) &&
1380                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1381                         return pos;
1382
1383         return NULL;
1384 }
1385
1386 /**
1387  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1388  *
1389  * @sc:         device handle
1390  * @qo:         ecore_qable_obj
1391  * @elem:       ecore_exeq_elem
1392  *
1393  * Checks that the requested configuration can be added. If yes and if
1394  * requested, consume CAM credit.
1395  *
1396  * The 'validate' is run after the 'optimize'.
1397  *
1398  */
1399 static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1400                                               union ecore_qable_obj *qo,
1401                                               struct ecore_exeq_elem *elem)
1402 {
1403         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1404         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1405         int rc;
1406
1407         /* Check the registry */
1408         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1409         if (rc) {
1410                 ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1411                 return rc;
1412         }
1413
1414         /* Check if there is a pending ADD command for this
1415          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1416          */
1417         if (exeq->get(exeq, elem)) {
1418                 ECORE_MSG(sc, "There is a pending ADD command already\n");
1419                 return ECORE_EXISTS;
1420         }
1421
1422         /* TODO: Check the pending MOVE from other objects where this
1423          * object is a destination object.
1424          */
1425
1426         /* Consume the credit if not requested not to */
1427         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1428                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1429             o->get_credit(o)))
1430                 return ECORE_INVAL;
1431
1432         return ECORE_SUCCESS;
1433 }
1434
1435 /**
1436  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1437  *
1438  * @sc:         device handle
1439  * @qo:         quable object to check
1440  * @elem:       element that needs to be deleted
1441  *
1442  * Checks that the requested configuration can be deleted. If yes and if
1443  * requested, returns a CAM credit.
1444  *
1445  * The 'validate' is run after the 'optimize'.
1446  */
1447 static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1448                                               union ecore_qable_obj *qo,
1449                                               struct ecore_exeq_elem *elem)
1450 {
1451         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1452         struct ecore_vlan_mac_registry_elem *pos;
1453         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1454         struct ecore_exeq_elem query_elem;
1455
1456         /* If this classification can not be deleted (doesn't exist)
1457          * - return a ECORE_EXIST.
1458          */
1459         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1460         if (!pos) {
1461                 ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1462                 return ECORE_EXISTS;
1463         }
1464
1465         /* Check if there are pending DEL or MOVE commands for this
1466          * MAC/VLAN/VLAN-MAC. Return an error if so.
1467          */
1468         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1469
1470         /* Check for MOVE commands */
1471         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1472         if (exeq->get(exeq, &query_elem)) {
1473                 ECORE_ERR("There is a pending MOVE command already\n");
1474                 return ECORE_INVAL;
1475         }
1476
1477         /* Check for DEL commands */
1478         if (exeq->get(exeq, elem)) {
1479                 ECORE_MSG(sc, "There is a pending DEL command already\n");
1480                 return ECORE_EXISTS;
1481         }
1482
1483         /* Return the credit to the credit pool if not requested not to */
1484         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1485                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1486             o->put_credit(o))) {
1487                 ECORE_ERR("Failed to return a credit\n");
1488                 return ECORE_INVAL;
1489         }
1490
1491         return ECORE_SUCCESS;
1492 }
1493
1494 /**
1495  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1496  *
1497  * @sc:         device handle
1498  * @qo:         quable object to check (source)
1499  * @elem:       element that needs to be moved
1500  *
1501  * Checks that the requested configuration can be moved. If yes and if
1502  * requested, returns a CAM credit.
1503  *
1504  * The 'validate' is run after the 'optimize'.
1505  */
1506 static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1507                                                union ecore_qable_obj *qo,
1508                                                struct ecore_exeq_elem *elem)
1509 {
1510         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1511         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1512         struct ecore_exeq_elem query_elem;
1513         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1514         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1515
1516         /* Check if we can perform this operation based on the current registry
1517          * state.
1518          */
1519         if (!src_o->check_move(sc, src_o, dest_o,
1520                                &elem->cmd_data.vlan_mac.u)) {
1521                 ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1522                 return ECORE_INVAL;
1523         }
1524
1525         /* Check if there is an already pending DEL or MOVE command for the
1526          * source object or ADD command for a destination object. Return an
1527          * error if so.
1528          */
1529         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1530
1531         /* Check DEL on source */
1532         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1533         if (src_exeq->get(src_exeq, &query_elem)) {
1534                 ECORE_ERR("There is a pending DEL command on the source queue already\n");
1535                 return ECORE_INVAL;
1536         }
1537
1538         /* Check MOVE on source */
1539         if (src_exeq->get(src_exeq, elem)) {
1540                 ECORE_MSG(sc, "There is a pending MOVE command already\n");
1541                 return ECORE_EXISTS;
1542         }
1543
1544         /* Check ADD on destination */
1545         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1546         if (dest_exeq->get(dest_exeq, &query_elem)) {
1547                 ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1548                 return ECORE_INVAL;
1549         }
1550
1551         /* Consume the credit if not requested not to */
1552         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1553                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1554             dest_o->get_credit(dest_o)))
1555                 return ECORE_INVAL;
1556
1557         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1558                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1559             src_o->put_credit(src_o))) {
1560                 /* return the credit taken from dest... */
1561                 dest_o->put_credit(dest_o);
1562                 return ECORE_INVAL;
1563         }
1564
1565         return ECORE_SUCCESS;
1566 }
1567
1568 static int ecore_validate_vlan_mac(struct bxe_softc *sc,
1569                                    union ecore_qable_obj *qo,
1570                                    struct ecore_exeq_elem *elem)
1571 {
1572         switch (elem->cmd_data.vlan_mac.cmd) {
1573         case ECORE_VLAN_MAC_ADD:
1574                 return ecore_validate_vlan_mac_add(sc, qo, elem);
1575         case ECORE_VLAN_MAC_DEL:
1576                 return ecore_validate_vlan_mac_del(sc, qo, elem);
1577         case ECORE_VLAN_MAC_MOVE:
1578                 return ecore_validate_vlan_mac_move(sc, qo, elem);
1579         default:
1580                 return ECORE_INVAL;
1581         }
1582 }
1583
1584 static int ecore_remove_vlan_mac(struct bxe_softc *sc,
1585                                   union ecore_qable_obj *qo,
1586                                   struct ecore_exeq_elem *elem)
1587 {
1588         int rc = 0;
1589
1590         /* If consumption wasn't required, nothing to do */
1591         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1592                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
1593                 return ECORE_SUCCESS;
1594
1595         switch (elem->cmd_data.vlan_mac.cmd) {
1596         case ECORE_VLAN_MAC_ADD:
1597         case ECORE_VLAN_MAC_MOVE:
1598                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1599                 break;
1600         case ECORE_VLAN_MAC_DEL:
1601                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1602                 break;
1603         default:
1604                 return ECORE_INVAL;
1605         }
1606
1607         if (rc != TRUE)
1608                 return ECORE_INVAL;
1609
1610         return ECORE_SUCCESS;
1611 }
1612
1613 /**
1614  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1615  *
1616  * @sc:         device handle
1617  * @o:          ecore_vlan_mac_obj
1618  *
1619  */
1620 static int ecore_wait_vlan_mac(struct bxe_softc *sc,
1621                                struct ecore_vlan_mac_obj *o)
1622 {
1623         int cnt = 5000, rc;
1624         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1625         struct ecore_raw_obj *raw = &o->raw;
1626
1627         while (cnt--) {
1628                 /* Wait for the current command to complete */
1629                 rc = raw->wait_comp(sc, raw);
1630                 if (rc)
1631                         return rc;
1632
1633                 /* Wait until there are no pending commands */
1634                 if (!ecore_exe_queue_empty(exeq))
1635                         ECORE_WAIT(sc, 1000);
1636                 else
1637                         return ECORE_SUCCESS;
1638         }
1639
1640         return ECORE_TIMEOUT;
1641 }
1642
1643 static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1644                                          struct ecore_vlan_mac_obj *o,
1645                                          unsigned long *ramrod_flags)
1646 {
1647         int rc = ECORE_SUCCESS;
1648
1649         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1650
1651         ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1652         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1653
1654         if (rc != ECORE_SUCCESS) {
1655                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1656
1657                 /** Calling function should not diffrentiate between this case
1658                  *  and the case in which there is already a pending ramrod
1659                  */
1660                 rc = ECORE_PENDING;
1661         } else {
1662                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1663         }
1664         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1665
1666         return rc;
1667 }
1668
1669 /**
1670  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1671  *
1672  * @sc:         device handle
1673  * @o:          ecore_vlan_mac_obj
1674  * @cqe:
1675  * @cont:       if TRUE schedule next execution chunk
1676  *
1677  */
1678 static int ecore_complete_vlan_mac(struct bxe_softc *sc,
1679                                    struct ecore_vlan_mac_obj *o,
1680                                    union event_ring_elem *cqe,
1681                                    unsigned long *ramrod_flags)
1682 {
1683         struct ecore_raw_obj *r = &o->raw;
1684         int rc;
1685
1686         /* Clearing the pending list & raw state should be made
1687          * atomically (as execution flow assumes they represent the same)
1688          */
1689         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1690
1691         /* Reset pending list */
1692         __ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1693
1694         /* Clear pending */
1695         r->clear_pending(r);
1696
1697         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1698
1699         /* If ramrod failed this is most likely a SW bug */
1700         if (cqe->message.error)
1701                 return ECORE_INVAL;
1702
1703         /* Run the next bulk of pending commands if requested */
1704         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1705                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1706                 if (rc < 0)
1707                         return rc;
1708         }
1709
1710         /* If there is more work to do return PENDING */
1711         if (!ecore_exe_queue_empty(&o->exe_queue))
1712                 return ECORE_PENDING;
1713
1714         return ECORE_SUCCESS;
1715 }
1716
1717 /**
1718  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1719  *
1720  * @sc:         device handle
1721  * @o:          ecore_qable_obj
1722  * @elem:       ecore_exeq_elem
1723  */
1724 static int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1725                                    union ecore_qable_obj *qo,
1726                                    struct ecore_exeq_elem *elem)
1727 {
1728         struct ecore_exeq_elem query, *pos;
1729         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1730         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1731
1732         ECORE_MEMCPY(&query, elem, sizeof(query));
1733
1734         switch (elem->cmd_data.vlan_mac.cmd) {
1735         case ECORE_VLAN_MAC_ADD:
1736                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1737                 break;
1738         case ECORE_VLAN_MAC_DEL:
1739                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1740                 break;
1741         default:
1742                 /* Don't handle anything other than ADD or DEL */
1743                 return 0;
1744         }
1745
1746         /* If we found the appropriate element - delete it */
1747         pos = exeq->get(exeq, &query);
1748         if (pos) {
1749
1750                 /* Return the credit of the optimized command */
1751                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1752                                      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1753                         if ((query.cmd_data.vlan_mac.cmd ==
1754                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1755                                 ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1756                                 return ECORE_INVAL;
1757                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1758                                 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1759                                 return ECORE_INVAL;
1760                         }
1761                 }
1762
1763                 ECORE_MSG(sc, "Optimizing %s command\n",
1764                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1765                           "ADD" : "DEL");
1766
1767                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1768                 ecore_exe_queue_free_elem(sc, pos);
1769                 return 1;
1770         }
1771
1772         return 0;
1773 }
1774
1775 /**
1776  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1777  *
1778  * @sc:   device handle
1779  * @o:
1780  * @elem:
1781  * @restore:
1782  * @re:
1783  *
1784  * prepare a registry element according to the current command request.
1785  */
1786 static inline int ecore_vlan_mac_get_registry_elem(
1787         struct bxe_softc *sc,
1788         struct ecore_vlan_mac_obj *o,
1789         struct ecore_exeq_elem *elem,
1790         bool restore,
1791         struct ecore_vlan_mac_registry_elem **re)
1792 {
1793         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1794         struct ecore_vlan_mac_registry_elem *reg_elem;
1795
1796         /* Allocate a new registry element if needed. */
1797         if (!restore &&
1798             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1799                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1800                 if (!reg_elem)
1801                         return ECORE_NOMEM;
1802
1803                 /* Get a new CAM offset */
1804                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1805                         /* This shall never happen, because we have checked the
1806                          * CAM availability in the 'validate'.
1807                          */
1808                         ECORE_DBG_BREAK_IF(1);
1809                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1810                         return ECORE_INVAL;
1811                 }
1812
1813                 ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1814
1815                 /* Set a VLAN-MAC data */
1816                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1817                           sizeof(reg_elem->u));
1818
1819                 /* Copy the flags (needed for DEL and RESTORE flows) */
1820                 reg_elem->vlan_mac_flags =
1821                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1822         } else /* DEL, RESTORE */
1823                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1824
1825         *re = reg_elem;
1826         return ECORE_SUCCESS;
1827 }
1828
1829 /**
1830  * ecore_execute_vlan_mac - execute vlan mac command
1831  *
1832  * @sc:                 device handle
1833  * @qo:
1834  * @exe_chunk:
1835  * @ramrod_flags:
1836  *
1837  * go and send a ramrod!
1838  */
1839 static int ecore_execute_vlan_mac(struct bxe_softc *sc,
1840                                   union ecore_qable_obj *qo,
1841                                   ecore_list_t *exe_chunk,
1842                                   unsigned long *ramrod_flags)
1843 {
1844         struct ecore_exeq_elem *elem;
1845         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1846         struct ecore_raw_obj *r = &o->raw;
1847         int rc, idx = 0;
1848         bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1849         bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1850         struct ecore_vlan_mac_registry_elem *reg_elem;
1851         enum ecore_vlan_mac_cmd cmd;
1852
1853         /* If DRIVER_ONLY execution is requested, cleanup a registry
1854          * and exit. Otherwise send a ramrod to FW.
1855          */
1856         if (!drv_only) {
1857                 ECORE_DBG_BREAK_IF(r->check_pending(r));
1858
1859                 /* Set pending */
1860                 r->set_pending(r);
1861
1862                 /* Fill the ramrod data */
1863                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1864                                           struct ecore_exeq_elem) {
1865                         cmd = elem->cmd_data.vlan_mac.cmd;
1866                         /* We will add to the target object in MOVE command, so
1867                          * change the object for a CAM search.
1868                          */
1869                         if (cmd == ECORE_VLAN_MAC_MOVE)
1870                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1871                         else
1872                                 cam_obj = o;
1873
1874                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1875                                                               elem, restore,
1876                                                               &reg_elem);
1877                         if (rc)
1878                                 goto error_exit;
1879
1880                         ECORE_DBG_BREAK_IF(!reg_elem);
1881
1882                         /* Push a new entry into the registry */
1883                         if (!restore &&
1884                             ((cmd == ECORE_VLAN_MAC_ADD) ||
1885                             (cmd == ECORE_VLAN_MAC_MOVE)))
1886                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1887                                                      &cam_obj->head);
1888
1889                         /* Configure a single command in a ramrod data buffer */
1890                         o->set_one_rule(sc, o, elem, idx,
1891                                         reg_elem->cam_offset);
1892
1893                         /* MOVE command consumes 2 entries in the ramrod data */
1894                         if (cmd == ECORE_VLAN_MAC_MOVE)
1895                                 idx += 2;
1896                         else
1897                                 idx++;
1898                 }
1899
1900                 /*
1901                  *  No need for an explicit memory barrier here as long we would
1902                  *  need to ensure the ordering of writing to the SPQ element
1903                  *  and updating of the SPQ producer which involves a memory
1904                  *  read and we will have to put a full memory barrier there
1905                  *  (inside ecore_sp_post()).
1906                  */
1907
1908                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1909                                    r->rdata_mapping,
1910                                    ETH_CONNECTION_TYPE);
1911                 if (rc)
1912                         goto error_exit;
1913         }
1914
1915         /* Now, when we are done with the ramrod - clean up the registry */
1916         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1917                                   struct ecore_exeq_elem) {
1918                 cmd = elem->cmd_data.vlan_mac.cmd;
1919                 if ((cmd == ECORE_VLAN_MAC_DEL) ||
1920                     (cmd == ECORE_VLAN_MAC_MOVE)) {
1921                         reg_elem = o->check_del(sc, o,
1922                                                 &elem->cmd_data.vlan_mac.u);
1923
1924                         ECORE_DBG_BREAK_IF(!reg_elem);
1925
1926                         o->put_cam_offset(o, reg_elem->cam_offset);
1927                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1928                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1929                 }
1930         }
1931
1932         if (!drv_only)
1933                 return ECORE_PENDING;
1934         else
1935                 return ECORE_SUCCESS;
1936
1937 error_exit:
1938         r->clear_pending(r);
1939
1940         /* Cleanup a registry in case of a failure */
1941         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1942                                   struct ecore_exeq_elem) {
1943                 cmd = elem->cmd_data.vlan_mac.cmd;
1944
1945                 if (cmd == ECORE_VLAN_MAC_MOVE)
1946                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1947                 else
1948                         cam_obj = o;
1949
1950                 /* Delete all newly added above entries */
1951                 if (!restore &&
1952                     ((cmd == ECORE_VLAN_MAC_ADD) ||
1953                     (cmd == ECORE_VLAN_MAC_MOVE))) {
1954                         reg_elem = o->check_del(sc, cam_obj,
1955                                                 &elem->cmd_data.vlan_mac.u);
1956                         if (reg_elem) {
1957                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1958                                                         &cam_obj->head);
1959                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1960                         }
1961                 }
1962         }
1963
1964         return rc;
1965 }
1966
1967 static inline int ecore_vlan_mac_push_new_cmd(
1968         struct bxe_softc *sc,
1969         struct ecore_vlan_mac_ramrod_params *p)
1970 {
1971         struct ecore_exeq_elem *elem;
1972         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1973         bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1974
1975         /* Allocate the execution queue element */
1976         elem = ecore_exe_queue_alloc_elem(sc);
1977         if (!elem)
1978                 return ECORE_NOMEM;
1979
1980         /* Set the command 'length' */
1981         switch (p->user_req.cmd) {
1982         case ECORE_VLAN_MAC_MOVE:
1983                 elem->cmd_len = 2;
1984                 break;
1985         default:
1986                 elem->cmd_len = 1;
1987         }
1988
1989         /* Fill the object specific info */
1990         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1991
1992         /* Try to add a new command to the pending list */
1993         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1994 }
1995
1996 /**
1997  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1998  *
1999  * @sc:   device handle
2000  * @p:
2001  *
2002  */
2003 int ecore_config_vlan_mac(struct bxe_softc *sc,
2004                            struct ecore_vlan_mac_ramrod_params *p)
2005 {
2006         int rc = ECORE_SUCCESS;
2007         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2008         unsigned long *ramrod_flags = &p->ramrod_flags;
2009         bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2010         struct ecore_raw_obj *raw = &o->raw;
2011
2012         /*
2013          * Add new elements to the execution list for commands that require it.
2014          */
2015         if (!cont) {
2016                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
2017                 if (rc)
2018                         return rc;
2019         }
2020
2021         /* If nothing will be executed further in this iteration we want to
2022          * return PENDING if there are pending commands
2023          */
2024         if (!ecore_exe_queue_empty(&o->exe_queue))
2025                 rc = ECORE_PENDING;
2026
2027         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2028                 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2029                 raw->clear_pending(raw);
2030         }
2031
2032         /* Execute commands if required */
2033         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2034             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2035                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2036                                                    &p->ramrod_flags);
2037                 if (rc < 0)
2038                         return rc;
2039         }
2040
2041         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2042          * then user want to wait until the last command is done.
2043          */
2044         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2045                 /* Wait maximum for the current exe_queue length iterations plus
2046                  * one (for the current pending command).
2047                  */
2048                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2049
2050                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
2051                        max_iterations--) {
2052
2053                         /* Wait for the current command to complete */
2054                         rc = raw->wait_comp(sc, raw);
2055                         if (rc)
2056                                 return rc;
2057
2058                         /* Make a next step */
2059                         rc = __ecore_vlan_mac_execute_step(sc,
2060                                                            p->vlan_mac_obj,
2061                                                            &p->ramrod_flags);
2062                         if (rc < 0)
2063                                 return rc;
2064                 }
2065
2066                 return ECORE_SUCCESS;
2067         }
2068
2069         return rc;
2070 }
2071
2072 /**
2073  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2074  *
2075  * @sc:                 device handle
2076  * @o:
2077  * @vlan_mac_flags:
2078  * @ramrod_flags:       execution flags to be used for this deletion
2079  *
2080  * if the last operation has completed successfully and there are no
2081  * more elements left, positive value if the last operation has completed
2082  * successfully and there are more previously configured elements, negative
2083  * value is current operation has failed.
2084  */
2085 static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2086                                   struct ecore_vlan_mac_obj *o,
2087                                   unsigned long *vlan_mac_flags,
2088                                   unsigned long *ramrod_flags)
2089 {
2090         struct ecore_vlan_mac_registry_elem *pos = NULL;
2091         struct ecore_vlan_mac_ramrod_params p;
2092         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2093         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2094         int read_lock;
2095         int rc = 0;
2096
2097         /* Clear pending commands first */
2098
2099         ECORE_SPIN_LOCK_BH(&exeq->lock);
2100
2101         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2102                                        &exeq->exe_queue, link,
2103                                        struct ecore_exeq_elem) {
2104                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2105                     *vlan_mac_flags) {
2106                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
2107                         if (rc) {
2108                                 ECORE_ERR("Failed to remove command\n");
2109                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2110                                 return rc;
2111                         }
2112                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2113                                                 &exeq->exe_queue);
2114                         ecore_exe_queue_free_elem(sc, exeq_pos);
2115                 }
2116         }
2117
2118         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2119
2120         /* Prepare a command request */
2121         ECORE_MEMSET(&p, 0, sizeof(p));
2122         p.vlan_mac_obj = o;
2123         p.ramrod_flags = *ramrod_flags;
2124         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2125
2126         /* Add all but the last VLAN-MAC to the execution queue without actually
2127          * execution anything.
2128          */
2129         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2130         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2131         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2132
2133         ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2134         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2135         if (read_lock != ECORE_SUCCESS)
2136                 return read_lock;
2137
2138         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2139                                   struct ecore_vlan_mac_registry_elem) {
2140                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
2141                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2142                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2143                         rc = ecore_config_vlan_mac(sc, &p);
2144                         if (rc < 0) {
2145                                 ECORE_ERR("Failed to add a new DEL command\n");
2146                                 ecore_vlan_mac_h_read_unlock(sc, o);
2147                                 return rc;
2148                         }
2149                 }
2150         }
2151
2152         ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2153         ecore_vlan_mac_h_read_unlock(sc, o);
2154
2155         p.ramrod_flags = *ramrod_flags;
2156         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2157
2158         return ecore_config_vlan_mac(sc, &p);
2159 }
2160
2161 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2162         uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2163         unsigned long *pstate, ecore_obj_type type)
2164 {
2165         raw->func_id = func_id;
2166         raw->cid = cid;
2167         raw->cl_id = cl_id;
2168         raw->rdata = rdata;
2169         raw->rdata_mapping = rdata_mapping;
2170         raw->state = state;
2171         raw->pstate = pstate;
2172         raw->obj_type = type;
2173         raw->check_pending = ecore_raw_check_pending;
2174         raw->clear_pending = ecore_raw_clear_pending;
2175         raw->set_pending = ecore_raw_set_pending;
2176         raw->wait_comp = ecore_raw_wait;
2177 }
2178
2179 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2180         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2181         int state, unsigned long *pstate, ecore_obj_type type,
2182         struct ecore_credit_pool_obj *macs_pool,
2183         struct ecore_credit_pool_obj *vlans_pool)
2184 {
2185         ECORE_LIST_INIT(&o->head);
2186         o->head_reader = 0;
2187         o->head_exe_request = FALSE;
2188         o->saved_ramrod_flags = 0;
2189
2190         o->macs_pool = macs_pool;
2191         o->vlans_pool = vlans_pool;
2192
2193         o->delete_all = ecore_vlan_mac_del_all;
2194         o->restore = ecore_vlan_mac_restore;
2195         o->complete = ecore_complete_vlan_mac;
2196         o->wait = ecore_wait_vlan_mac;
2197
2198         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2199                            state, pstate, type);
2200 }
2201
2202 void ecore_init_mac_obj(struct bxe_softc *sc,
2203                         struct ecore_vlan_mac_obj *mac_obj,
2204                         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2205                         ecore_dma_addr_t rdata_mapping, int state,
2206                         unsigned long *pstate, ecore_obj_type type,
2207                         struct ecore_credit_pool_obj *macs_pool)
2208 {
2209         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2210
2211         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2212                                    rdata_mapping, state, pstate, type,
2213                                    macs_pool, NULL);
2214
2215         /* CAM credit pool handling */
2216         mac_obj->get_credit = ecore_get_credit_mac;
2217         mac_obj->put_credit = ecore_put_credit_mac;
2218         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2219         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2220
2221         if (CHIP_IS_E1x(sc)) {
2222                 mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2223                 mac_obj->check_del         = ecore_check_mac_del;
2224                 mac_obj->check_add         = ecore_check_mac_add;
2225                 mac_obj->check_move        = ecore_check_move_always_err;
2226                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2227
2228                 /* Exe Queue */
2229                 ecore_exe_queue_init(sc,
2230                                      &mac_obj->exe_queue, 1, qable_obj,
2231                                      ecore_validate_vlan_mac,
2232                                      ecore_remove_vlan_mac,
2233                                      ecore_optimize_vlan_mac,
2234                                      ecore_execute_vlan_mac,
2235                                      ecore_exeq_get_mac);
2236         } else {
2237                 mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2238                 mac_obj->check_del         = ecore_check_mac_del;
2239                 mac_obj->check_add         = ecore_check_mac_add;
2240                 mac_obj->check_move        = ecore_check_move;
2241                 mac_obj->ramrod_cmd        =
2242                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2243                 mac_obj->get_n_elements    = ecore_get_n_elements;
2244
2245                 /* Exe Queue */
2246                 ecore_exe_queue_init(sc,
2247                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2248                                      qable_obj, ecore_validate_vlan_mac,
2249                                      ecore_remove_vlan_mac,
2250                                      ecore_optimize_vlan_mac,
2251                                      ecore_execute_vlan_mac,
2252                                      ecore_exeq_get_mac);
2253         }
2254 }
2255
2256 void ecore_init_vlan_obj(struct bxe_softc *sc,
2257                          struct ecore_vlan_mac_obj *vlan_obj,
2258                          uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2259                          ecore_dma_addr_t rdata_mapping, int state,
2260                          unsigned long *pstate, ecore_obj_type type,
2261                          struct ecore_credit_pool_obj *vlans_pool)
2262 {
2263         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2264
2265         ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2266                                    rdata_mapping, state, pstate, type, NULL,
2267                                    vlans_pool);
2268
2269         vlan_obj->get_credit = ecore_get_credit_vlan;
2270         vlan_obj->put_credit = ecore_put_credit_vlan;
2271         vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2272         vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2273
2274         if (CHIP_IS_E1x(sc)) {
2275                 ECORE_ERR("Do not support chips others than E2 and newer\n");
2276                 ECORE_BUG();
2277         } else {
2278                 vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2279                 vlan_obj->check_del         = ecore_check_vlan_del;
2280                 vlan_obj->check_add         = ecore_check_vlan_add;
2281                 vlan_obj->check_move        = ecore_check_move;
2282                 vlan_obj->ramrod_cmd        =
2283                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2284                 vlan_obj->get_n_elements    = ecore_get_n_elements;
2285
2286                 /* Exe Queue */
2287                 ecore_exe_queue_init(sc,
2288                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2289                                      qable_obj, ecore_validate_vlan_mac,
2290                                      ecore_remove_vlan_mac,
2291                                      ecore_optimize_vlan_mac,
2292                                      ecore_execute_vlan_mac,
2293                                      ecore_exeq_get_vlan);
2294         }
2295 }
2296
2297 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2298                              struct ecore_vlan_mac_obj *vlan_mac_obj,
2299                              uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2300                              ecore_dma_addr_t rdata_mapping, int state,
2301                              unsigned long *pstate, ecore_obj_type type,
2302                              struct ecore_credit_pool_obj *macs_pool,
2303                              struct ecore_credit_pool_obj *vlans_pool)
2304 {
2305         union ecore_qable_obj *qable_obj =
2306                 (union ecore_qable_obj *)vlan_mac_obj;
2307
2308         ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2309                                    rdata_mapping, state, pstate, type,
2310                                    macs_pool, vlans_pool);
2311
2312         /* CAM pool handling */
2313         vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2314         vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2315         /* CAM offset is relevant for 57710 and 57711 chips only which have a
2316          * single CAM for both MACs and VLAN-MAC pairs. So the offset
2317          * will be taken from MACs' pool object only.
2318          */
2319         vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2320         vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2321
2322         if (CHIP_IS_E1(sc)) {
2323                 ECORE_ERR("Do not support chips others than E2\n");
2324                 ECORE_BUG();
2325         } else if (CHIP_IS_E1H(sc)) {
2326                 vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2327                 vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2328                 vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2329                 vlan_mac_obj->check_move        = ecore_check_move_always_err;
2330                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2331
2332                 /* Exe Queue */
2333                 ecore_exe_queue_init(sc,
2334                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
2335                                      ecore_validate_vlan_mac,
2336                                      ecore_remove_vlan_mac,
2337                                      ecore_optimize_vlan_mac,
2338                                      ecore_execute_vlan_mac,
2339                                      ecore_exeq_get_vlan_mac);
2340         } else {
2341                 vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2342                 vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2343                 vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2344                 vlan_mac_obj->check_move        = ecore_check_move;
2345                 vlan_mac_obj->ramrod_cmd        =
2346                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2347
2348                 /* Exe Queue */
2349                 ecore_exe_queue_init(sc,
2350                                      &vlan_mac_obj->exe_queue,
2351                                      CLASSIFY_RULES_COUNT,
2352                                      qable_obj, ecore_validate_vlan_mac,
2353                                      ecore_remove_vlan_mac,
2354                                      ecore_optimize_vlan_mac,
2355                                      ecore_execute_vlan_mac,
2356                                      ecore_exeq_get_vlan_mac);
2357         }
2358 }
2359
2360 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2361 static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2362                         struct tstorm_eth_mac_filter_config *mac_filters,
2363                         uint16_t pf_id)
2364 {
2365         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2366
2367         uint32_t addr = BAR_TSTRORM_INTMEM +
2368                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2369
2370         ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2371 }
2372
2373 static int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2374                                  struct ecore_rx_mode_ramrod_params *p)
2375 {
2376         /* update the sc MAC filter structure */
2377         uint32_t mask = (1 << p->cl_id);
2378
2379         struct tstorm_eth_mac_filter_config *mac_filters =
2380                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2381
2382         /* initial setting is drop-all */
2383         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2384         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2385         uint8_t unmatched_unicast = 0;
2386
2387     /* In e1x there we only take into account rx accept flag since tx switching
2388      * isn't enabled. */
2389         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2390                 /* accept matched ucast */
2391                 drop_all_ucast = 0;
2392
2393         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2394                 /* accept matched mcast */
2395                 drop_all_mcast = 0;
2396
2397         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2398                 /* accept all mcast */
2399                 drop_all_ucast = 0;
2400                 accp_all_ucast = 1;
2401         }
2402         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2403                 /* accept all mcast */
2404                 drop_all_mcast = 0;
2405                 accp_all_mcast = 1;
2406         }
2407         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2408                 /* accept (all) bcast */
2409                 accp_all_bcast = 1;
2410         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2411                 /* accept unmatched unicasts */
2412                 unmatched_unicast = 1;
2413
2414         mac_filters->ucast_drop_all = drop_all_ucast ?
2415                 mac_filters->ucast_drop_all | mask :
2416                 mac_filters->ucast_drop_all & ~mask;
2417
2418         mac_filters->mcast_drop_all = drop_all_mcast ?
2419                 mac_filters->mcast_drop_all | mask :
2420                 mac_filters->mcast_drop_all & ~mask;
2421
2422         mac_filters->ucast_accept_all = accp_all_ucast ?
2423                 mac_filters->ucast_accept_all | mask :
2424                 mac_filters->ucast_accept_all & ~mask;
2425
2426         mac_filters->mcast_accept_all = accp_all_mcast ?
2427                 mac_filters->mcast_accept_all | mask :
2428                 mac_filters->mcast_accept_all & ~mask;
2429
2430         mac_filters->bcast_accept_all = accp_all_bcast ?
2431                 mac_filters->bcast_accept_all | mask :
2432                 mac_filters->bcast_accept_all & ~mask;
2433
2434         mac_filters->unmatched_unicast = unmatched_unicast ?
2435                 mac_filters->unmatched_unicast | mask :
2436                 mac_filters->unmatched_unicast & ~mask;
2437
2438         ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2439                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2440            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2441            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2442            mac_filters->bcast_accept_all);
2443
2444         /* write the MAC filter structure*/
2445         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2446
2447         /* The operation is completed */
2448         ECORE_CLEAR_BIT(p->state, p->pstate);
2449         ECORE_SMP_MB_AFTER_CLEAR_BIT();
2450
2451         return ECORE_SUCCESS;
2452 }
2453
2454 /* Setup ramrod data */
2455 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2456                                 struct eth_classify_header *hdr,
2457                                 uint8_t rule_cnt)
2458 {
2459         hdr->echo = ECORE_CPU_TO_LE32(cid);
2460         hdr->rule_cnt = rule_cnt;
2461 }
2462
2463 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2464                                 unsigned long *accept_flags,
2465                                 struct eth_filter_rules_cmd *cmd,
2466                                 bool clear_accept_all)
2467 {
2468         uint16_t state;
2469
2470         /* start with 'drop-all' */
2471         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2472                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2473
2474         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2475                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2476
2477         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2478                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2479
2480         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2481                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2482                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2483         }
2484
2485         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2486                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2487                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2488         }
2489         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2490                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2491
2492         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2493                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2494                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2495         }
2496         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2497                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2498
2499         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2500         if (clear_accept_all) {
2501                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2502                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2503                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2504                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2505         }
2506
2507         cmd->state = ECORE_CPU_TO_LE16(state);
2508 }
2509
2510 static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2511                                 struct ecore_rx_mode_ramrod_params *p)
2512 {
2513         struct eth_filter_rules_ramrod_data *data = p->rdata;
2514         int rc;
2515         uint8_t rule_idx = 0;
2516
2517         /* Reset the ramrod data buffer */
2518         ECORE_MEMSET(data, 0, sizeof(*data));
2519
2520         /* Setup ramrod data */
2521
2522         /* Tx (internal switching) */
2523         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2524                 data->rules[rule_idx].client_id = p->cl_id;
2525                 data->rules[rule_idx].func_id = p->func_id;
2526
2527                 data->rules[rule_idx].cmd_general_data =
2528                         ETH_FILTER_RULES_CMD_TX_CMD;
2529
2530                 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2531                                                &(data->rules[rule_idx++]),
2532                                                FALSE);
2533         }
2534
2535         /* Rx */
2536         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2537                 data->rules[rule_idx].client_id = p->cl_id;
2538                 data->rules[rule_idx].func_id = p->func_id;
2539
2540                 data->rules[rule_idx].cmd_general_data =
2541                         ETH_FILTER_RULES_CMD_RX_CMD;
2542
2543                 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2544                                                &(data->rules[rule_idx++]),
2545                                                FALSE);
2546         }
2547
2548         /* If FCoE Queue configuration has been requested configure the Rx and
2549          * internal switching modes for this queue in separate rules.
2550          *
2551          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2552          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2553          */
2554         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2555                 /*  Tx (internal switching) */
2556                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2557                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2558                         data->rules[rule_idx].func_id = p->func_id;
2559
2560                         data->rules[rule_idx].cmd_general_data =
2561                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2562
2563                         ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2564                                                        &(data->rules[rule_idx]),
2565                                                        TRUE);
2566                         rule_idx++;
2567                 }
2568
2569                 /* Rx */
2570                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2571                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2572                         data->rules[rule_idx].func_id = p->func_id;
2573
2574                         data->rules[rule_idx].cmd_general_data =
2575                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2576
2577                         ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2578                                                        &(data->rules[rule_idx]),
2579                                                        TRUE);
2580                         rule_idx++;
2581                 }
2582         }
2583
2584         /* Set the ramrod header (most importantly - number of rules to
2585          * configure).
2586          */
2587         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2588
2589         ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2590                   data->header.rule_cnt, p->rx_accept_flags,
2591                   p->tx_accept_flags);
2592
2593         /* No need for an explicit memory barrier here as long we would
2594          * need to ensure the ordering of writing to the SPQ element
2595          * and updating of the SPQ producer which involves a memory
2596          * read and we will have to put a full memory barrier there
2597          * (inside ecore_sp_post()).
2598          */
2599
2600         /* Send a ramrod */
2601         rc = ecore_sp_post(sc,
2602                            RAMROD_CMD_ID_ETH_FILTER_RULES,
2603                            p->cid,
2604                            p->rdata_mapping,
2605                            ETH_CONNECTION_TYPE);
2606         if (rc)
2607                 return rc;
2608
2609         /* Ramrod completion is pending */
2610         return ECORE_PENDING;
2611 }
2612
2613 static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2614                                       struct ecore_rx_mode_ramrod_params *p)
2615 {
2616         return ecore_state_wait(sc, p->state, p->pstate);
2617 }
2618
2619 static int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2620                                     struct ecore_rx_mode_ramrod_params *p)
2621 {
2622         /* Do nothing */
2623         return ECORE_SUCCESS;
2624 }
2625
2626 int ecore_config_rx_mode(struct bxe_softc *sc,
2627                          struct ecore_rx_mode_ramrod_params *p)
2628 {
2629         int rc;
2630
2631         /* Configure the new classification in the chip */
2632         rc = p->rx_mode_obj->config_rx_mode(sc, p);
2633         if (rc < 0)
2634                 return rc;
2635
2636         /* Wait for a ramrod completion if was requested */
2637         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2638                 rc = p->rx_mode_obj->wait_comp(sc, p);
2639                 if (rc)
2640                         return rc;
2641         }
2642
2643         return rc;
2644 }
2645
2646 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
2647                             struct ecore_rx_mode_obj *o)
2648 {
2649         if (CHIP_IS_E1x(sc)) {
2650                 o->wait_comp      = ecore_empty_rx_mode_wait;
2651                 o->config_rx_mode = ecore_set_rx_mode_e1x;
2652         } else {
2653                 o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2654                 o->config_rx_mode = ecore_set_rx_mode_e2;
2655         }
2656 }
2657
2658 /********************* Multicast verbs: SET, CLEAR ****************************/
2659 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2660 {
2661         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2662 }
2663
2664 struct ecore_mcast_mac_elem {
2665         ecore_list_entry_t link;
2666         uint8_t mac[ETH_ALEN];
2667         uint8_t pad[2]; /* For a natural alignment of the following buffer */
2668 };
2669
2670 struct ecore_pending_mcast_cmd {
2671         ecore_list_entry_t link;
2672         int type; /* ECORE_MCAST_CMD_X */
2673         union {
2674                 ecore_list_t macs_head;
2675                 uint32_t macs_num; /* Needed for DEL command */
2676                 int next_bin; /* Needed for RESTORE flow with aprox match */
2677         } data;
2678
2679         bool done; /* set to TRUE, when the command has been handled,
2680                     * practically used in 57712 handling only, where one pending
2681                     * command may be handled in a few operations. As long as for
2682                     * other chips every operation handling is completed in a
2683                     * single ramrod, there is no need to utilize this field.
2684                     */
2685 };
2686
2687 static int ecore_mcast_wait(struct bxe_softc *sc,
2688                             struct ecore_mcast_obj *o)
2689 {
2690         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2691                         o->raw.wait_comp(sc, &o->raw))
2692                 return ECORE_TIMEOUT;
2693
2694         return ECORE_SUCCESS;
2695 }
2696
2697 static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2698                                    struct ecore_mcast_obj *o,
2699                                    struct ecore_mcast_ramrod_params *p,
2700                                    enum ecore_mcast_cmd cmd)
2701 {
2702         int total_sz;
2703         struct ecore_pending_mcast_cmd *new_cmd;
2704         struct ecore_mcast_mac_elem *cur_mac = NULL;
2705         struct ecore_mcast_list_elem *pos;
2706         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2707                              p->mcast_list_len : 0);
2708
2709         /* If the command is empty ("handle pending commands only"), break */
2710         if (!p->mcast_list_len)
2711                 return ECORE_SUCCESS;
2712
2713         total_sz = sizeof(*new_cmd) +
2714                 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2715
2716         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2717         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2718
2719         if (!new_cmd)
2720                 return ECORE_NOMEM;
2721
2722         ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2723                   cmd, macs_list_len);
2724
2725         ECORE_LIST_INIT(&new_cmd->data.macs_head);
2726
2727         new_cmd->type = cmd;
2728         new_cmd->done = FALSE;
2729
2730         switch (cmd) {
2731         case ECORE_MCAST_CMD_ADD:
2732                 cur_mac = (struct ecore_mcast_mac_elem *)
2733                           ((uint8_t *)new_cmd + sizeof(*new_cmd));
2734
2735                 /* Push the MACs of the current command into the pending command
2736                  * MACs list: FIFO
2737                  */
2738                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2739                                           struct ecore_mcast_list_elem) {
2740                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2741                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2742                                              &new_cmd->data.macs_head);
2743                         cur_mac++;
2744                 }
2745
2746                 break;
2747
2748         case ECORE_MCAST_CMD_DEL:
2749                 new_cmd->data.macs_num = p->mcast_list_len;
2750                 break;
2751
2752         case ECORE_MCAST_CMD_RESTORE:
2753                 new_cmd->data.next_bin = 0;
2754                 break;
2755
2756         default:
2757                 ECORE_FREE(sc, new_cmd, total_sz);
2758                 ECORE_ERR("Unknown command: %d\n", cmd);
2759                 return ECORE_INVAL;
2760         }
2761
2762         /* Push the new pending command to the tail of the pending list: FIFO */
2763         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2764
2765         o->set_sched(o);
2766
2767         return ECORE_PENDING;
2768 }
2769
2770 /**
2771  * ecore_mcast_get_next_bin - get the next set bin (index)
2772  *
2773  * @o:
2774  * @last:       index to start looking from (including)
2775  *
2776  * returns the next found (set) bin or a negative value if none is found.
2777  */
2778 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2779 {
2780         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2781
2782         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2783                 if (o->registry.aprox_match.vec[i])
2784                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2785                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2786                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2787                                                        vec, cur_bit)) {
2788                                         return cur_bit;
2789                                 }
2790                         }
2791                 inner_start = 0;
2792         }
2793
2794         /* None found */
2795         return -1;
2796 }
2797
2798 /**
2799  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2800  *
2801  * @o:
2802  *
2803  * returns the index of the found bin or -1 if none is found
2804  */
2805 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2806 {
2807         int cur_bit = ecore_mcast_get_next_bin(o, 0);
2808
2809         if (cur_bit >= 0)
2810                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2811
2812         return cur_bit;
2813 }
2814
2815 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2816 {
2817         struct ecore_raw_obj *raw = &o->raw;
2818         uint8_t rx_tx_flag = 0;
2819
2820         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2821             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2822                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2823
2824         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2825             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2826                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2827
2828         return rx_tx_flag;
2829 }
2830
2831 static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2832                                         struct ecore_mcast_obj *o, int idx,
2833                                         union ecore_mcast_config_data *cfg_data,
2834                                         enum ecore_mcast_cmd cmd)
2835 {
2836         struct ecore_raw_obj *r = &o->raw;
2837         struct eth_multicast_rules_ramrod_data *data =
2838                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2839         uint8_t func_id = r->func_id;
2840         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2841         int bin;
2842
2843         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2844                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2845
2846         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2847
2848         /* Get a bin and update a bins' vector */
2849         switch (cmd) {
2850         case ECORE_MCAST_CMD_ADD:
2851                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2852                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2853                 break;
2854
2855         case ECORE_MCAST_CMD_DEL:
2856                 /* If there were no more bins to clear
2857                  * (ecore_mcast_clear_first_bin() returns -1) then we would
2858                  * clear any (0xff) bin.
2859                  * See ecore_mcast_validate_e2() for explanation when it may
2860                  * happen.
2861                  */
2862                 bin = ecore_mcast_clear_first_bin(o);
2863                 break;
2864
2865         case ECORE_MCAST_CMD_RESTORE:
2866                 bin = cfg_data->bin;
2867                 break;
2868
2869         default:
2870                 ECORE_ERR("Unknown command: %d\n", cmd);
2871                 return;
2872         }
2873
2874         ECORE_MSG(sc, "%s bin %d\n",
2875                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2876                    "Setting"  : "Clearing"), bin);
2877
2878         data->rules[idx].bin_id    = (uint8_t)bin;
2879         data->rules[idx].func_id   = func_id;
2880         data->rules[idx].engine_id = o->engine_id;
2881 }
2882
2883 /**
2884  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2885  *
2886  * @sc:         device handle
2887  * @o:
2888  * @start_bin:  index in the registry to start from (including)
2889  * @rdata_idx:  index in the ramrod data to start from
2890  *
2891  * returns last handled bin index or -1 if all bins have been handled
2892  */
2893 static inline int ecore_mcast_handle_restore_cmd_e2(
2894         struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
2895         int *rdata_idx)
2896 {
2897         int cur_bin, cnt = *rdata_idx;
2898         union ecore_mcast_config_data cfg_data = {NULL};
2899
2900         /* go through the registry and configure the bins from it */
2901         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2902             cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2903
2904                 cfg_data.bin = (uint8_t)cur_bin;
2905                 o->set_one_rule(sc, o, cnt, &cfg_data,
2906                                 ECORE_MCAST_CMD_RESTORE);
2907
2908                 cnt++;
2909
2910                 ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
2911
2912                 /* Break if we reached the maximum number
2913                  * of rules.
2914                  */
2915                 if (cnt >= o->max_cmd_len)
2916                         break;
2917         }
2918
2919         *rdata_idx = cnt;
2920
2921         return cur_bin;
2922 }
2923
2924 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
2925         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2926         int *line_idx)
2927 {
2928         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2929         int cnt = *line_idx;
2930         union ecore_mcast_config_data cfg_data = {NULL};
2931
2932         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2933                 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
2934
2935                 cfg_data.mac = &pmac_pos->mac[0];
2936                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2937
2938                 cnt++;
2939
2940                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
2941                           pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2942
2943                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2944                                         &cmd_pos->data.macs_head);
2945
2946                 /* Break if we reached the maximum number
2947                  * of rules.
2948                  */
2949                 if (cnt >= o->max_cmd_len)
2950                         break;
2951         }
2952
2953         *line_idx = cnt;
2954
2955         /* if no more MACs to configure - we are done */
2956         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2957                 cmd_pos->done = TRUE;
2958 }
2959
2960 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
2961         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2962         int *line_idx)
2963 {
2964         int cnt = *line_idx;
2965
2966         while (cmd_pos->data.macs_num) {
2967                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2968
2969                 cnt++;
2970
2971                 cmd_pos->data.macs_num--;
2972
2973                   ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
2974                                   cmd_pos->data.macs_num, cnt);
2975
2976                 /* Break if we reached the maximum
2977                  * number of rules.
2978                  */
2979                 if (cnt >= o->max_cmd_len)
2980                         break;
2981         }
2982
2983         *line_idx = cnt;
2984
2985         /* If we cleared all bins - we are done */
2986         if (!cmd_pos->data.macs_num)
2987                 cmd_pos->done = TRUE;
2988 }
2989
2990 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
2991         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2992         int *line_idx)
2993 {
2994         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2995                                                 line_idx);
2996
2997         if (cmd_pos->data.next_bin < 0)
2998                 /* If o->set_restore returned -1 we are done */
2999                 cmd_pos->done = TRUE;
3000         else
3001                 /* Start from the next bin next time */
3002                 cmd_pos->data.next_bin++;
3003 }
3004
3005 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
3006                                 struct ecore_mcast_ramrod_params *p)
3007 {
3008         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3009         int cnt = 0;
3010         struct ecore_mcast_obj *o = p->mcast_obj;
3011
3012         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3013                 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3014                 switch (cmd_pos->type) {
3015                 case ECORE_MCAST_CMD_ADD:
3016                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3017                         break;
3018
3019                 case ECORE_MCAST_CMD_DEL:
3020                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3021                         break;
3022
3023                 case ECORE_MCAST_CMD_RESTORE:
3024                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3025                                                            &cnt);
3026                         break;
3027
3028                 default:
3029                         ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3030                         return ECORE_INVAL;
3031                 }
3032
3033                 /* If the command has been completed - remove it from the list
3034                  * and free the memory
3035                  */
3036                 if (cmd_pos->done) {
3037                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3038                                                 &o->pending_cmds_head);
3039                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3040                 }
3041
3042                 /* Break if we reached the maximum number of rules */
3043                 if (cnt >= o->max_cmd_len)
3044                         break;
3045         }
3046
3047         return cnt;
3048 }
3049
3050 static inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3051         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3052         int *line_idx)
3053 {
3054         struct ecore_mcast_list_elem *mlist_pos;
3055         union ecore_mcast_config_data cfg_data = {NULL};
3056         int cnt = *line_idx;
3057
3058         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3059                                   struct ecore_mcast_list_elem) {
3060                 cfg_data.mac = mlist_pos->mac;
3061                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3062
3063                 cnt++;
3064
3065                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3066                           mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3067         }
3068
3069         *line_idx = cnt;
3070 }
3071
3072 static inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3073         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3074         int *line_idx)
3075 {
3076         int cnt = *line_idx, i;
3077
3078         for (i = 0; i < p->mcast_list_len; i++) {
3079                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3080
3081                 cnt++;
3082
3083                 ECORE_MSG(sc, "Deleting MAC. %d left\n",
3084                           p->mcast_list_len - i - 1);
3085         }
3086
3087         *line_idx = cnt;
3088 }
3089
3090 /**
3091  * ecore_mcast_handle_current_cmd -
3092  *
3093  * @sc:         device handle
3094  * @p:
3095  * @cmd:
3096  * @start_cnt:  first line in the ramrod data that may be used
3097  *
3098  * This function is called iff there is enough place for the current command in
3099  * the ramrod data.
3100  * Returns number of lines filled in the ramrod data in total.
3101  */
3102 static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3103                         struct ecore_mcast_ramrod_params *p,
3104                         enum ecore_mcast_cmd cmd,
3105                         int start_cnt)
3106 {
3107         struct ecore_mcast_obj *o = p->mcast_obj;
3108         int cnt = start_cnt;
3109
3110         ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3111
3112         switch (cmd) {
3113         case ECORE_MCAST_CMD_ADD:
3114                 ecore_mcast_hdl_add(sc, o, p, &cnt);
3115                 break;
3116
3117         case ECORE_MCAST_CMD_DEL:
3118                 ecore_mcast_hdl_del(sc, o, p, &cnt);
3119                 break;
3120
3121         case ECORE_MCAST_CMD_RESTORE:
3122                 o->hdl_restore(sc, o, 0, &cnt);
3123                 break;
3124
3125         default:
3126                 ECORE_ERR("Unknown command: %d\n", cmd);
3127                 return ECORE_INVAL;
3128         }
3129
3130         /* The current command has been handled */
3131         p->mcast_list_len = 0;
3132
3133         return cnt;
3134 }
3135
3136 static int ecore_mcast_validate_e2(struct bxe_softc *sc,
3137                                    struct ecore_mcast_ramrod_params *p,
3138                                    enum ecore_mcast_cmd cmd)
3139 {
3140         struct ecore_mcast_obj *o = p->mcast_obj;
3141         int reg_sz = o->get_registry_size(o);
3142
3143         switch (cmd) {
3144         /* DEL command deletes all currently configured MACs */
3145         case ECORE_MCAST_CMD_DEL:
3146                 o->set_registry_size(o, 0);
3147                 /* Don't break */
3148
3149         /* RESTORE command will restore the entire multicast configuration */
3150         case ECORE_MCAST_CMD_RESTORE:
3151                 /* Here we set the approximate amount of work to do, which in
3152                  * fact may be only less as some MACs in postponed ADD
3153                  * command(s) scheduled before this command may fall into
3154                  * the same bin and the actual number of bins set in the
3155                  * registry would be less than we estimated here. See
3156                  * ecore_mcast_set_one_rule_e2() for further details.
3157                  */
3158                 p->mcast_list_len = reg_sz;
3159                 break;
3160
3161         case ECORE_MCAST_CMD_ADD:
3162         case ECORE_MCAST_CMD_CONT:
3163                 /* Here we assume that all new MACs will fall into new bins.
3164                  * However we will correct the real registry size after we
3165                  * handle all pending commands.
3166                  */
3167                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3168                 break;
3169
3170         default:
3171                 ECORE_ERR("Unknown command: %d\n", cmd);
3172                 return ECORE_INVAL;
3173         }
3174
3175         /* Increase the total number of MACs pending to be configured */
3176         o->total_pending_num += p->mcast_list_len;
3177
3178         return ECORE_SUCCESS;
3179 }
3180
3181 static void ecore_mcast_revert_e2(struct bxe_softc *sc,
3182                                       struct ecore_mcast_ramrod_params *p,
3183                                       int old_num_bins)
3184 {
3185         struct ecore_mcast_obj *o = p->mcast_obj;
3186
3187         o->set_registry_size(o, old_num_bins);
3188         o->total_pending_num -= p->mcast_list_len;
3189 }
3190
3191 /**
3192  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3193  *
3194  * @sc:         device handle
3195  * @p:
3196  * @len:        number of rules to handle
3197  */
3198 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3199                                         struct ecore_mcast_ramrod_params *p,
3200                                         uint8_t len)
3201 {
3202         struct ecore_raw_obj *r = &p->mcast_obj->raw;
3203         struct eth_multicast_rules_ramrod_data *data =
3204                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3205
3206         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3207                                         (ECORE_FILTER_MCAST_PENDING <<
3208                                          ECORE_SWCID_SHIFT));
3209         data->header.rule_cnt = len;
3210 }
3211
3212 /**
3213  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3214  *
3215  * @sc:         device handle
3216  * @o:
3217  *
3218  * Recalculate the actual number of set bins in the registry using Brian
3219  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3220  *
3221  * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3222  */
3223 static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3224                                                   struct ecore_mcast_obj *o)
3225 {
3226         int i, cnt = 0;
3227         uint64_t elem;
3228
3229         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3230                 elem = o->registry.aprox_match.vec[i];
3231                 for (; elem; cnt++)
3232                         elem &= elem - 1;
3233         }
3234
3235         o->set_registry_size(o, cnt);
3236
3237         return ECORE_SUCCESS;
3238 }
3239
3240 static int ecore_mcast_setup_e2(struct bxe_softc *sc,
3241                                 struct ecore_mcast_ramrod_params *p,
3242                                 enum ecore_mcast_cmd cmd)
3243 {
3244         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3245         struct ecore_mcast_obj *o = p->mcast_obj;
3246         struct eth_multicast_rules_ramrod_data *data =
3247                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3248         int cnt = 0, rc;
3249
3250         /* Reset the ramrod data buffer */
3251         ECORE_MEMSET(data, 0, sizeof(*data));
3252
3253         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3254
3255         /* If there are no more pending commands - clear SCHEDULED state */
3256         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3257                 o->clear_sched(o);
3258
3259         /* The below may be TRUE iff there was enough room in ramrod
3260          * data for all pending commands and for the current
3261          * command. Otherwise the current command would have been added
3262          * to the pending commands and p->mcast_list_len would have been
3263          * zeroed.
3264          */
3265         if (p->mcast_list_len > 0)
3266                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3267
3268         /* We've pulled out some MACs - update the total number of
3269          * outstanding.
3270          */
3271         o->total_pending_num -= cnt;
3272
3273         /* send a ramrod */
3274         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3275         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3276
3277         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3278
3279         /* Update a registry size if there are no more pending operations.
3280          *
3281          * We don't want to change the value of the registry size if there are
3282          * pending operations because we want it to always be equal to the
3283          * exact or the approximate number (see ecore_mcast_validate_e2()) of
3284          * set bins after the last requested operation in order to properly
3285          * evaluate the size of the next DEL/RESTORE operation.
3286          *
3287          * Note that we update the registry itself during command(s) handling
3288          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3289          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3290          * with a limited amount of update commands (per MAC/bin) and we don't
3291          * know in this scope what the actual state of bins configuration is
3292          * going to be after this ramrod.
3293          */
3294         if (!o->total_pending_num)
3295                 ecore_mcast_refresh_registry_e2(sc, o);
3296
3297         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3298          * RAMROD_PENDING status immediately.
3299          */
3300         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3301                 raw->clear_pending(raw);
3302                 return ECORE_SUCCESS;
3303         } else {
3304                 /* No need for an explicit memory barrier here as long we would
3305                  * need to ensure the ordering of writing to the SPQ element
3306                  * and updating of the SPQ producer which involves a memory
3307                  * read and we will have to put a full memory barrier there
3308                  * (inside ecore_sp_post()).
3309                  */
3310
3311                 /* Send a ramrod */
3312                 rc = ecore_sp_post( sc,
3313                                     RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3314                                     raw->cid,
3315                                     raw->rdata_mapping,
3316                                     ETH_CONNECTION_TYPE);
3317                 if (rc)
3318                         return rc;
3319
3320                 /* Ramrod completion is pending */
3321                 return ECORE_PENDING;
3322         }
3323 }
3324
3325 static int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3326                                     struct ecore_mcast_ramrod_params *p,
3327                                     enum ecore_mcast_cmd cmd)
3328 {
3329         /* Mark, that there is a work to do */
3330         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3331                 p->mcast_list_len = 1;
3332
3333         return ECORE_SUCCESS;
3334 }
3335
3336 static void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3337                                        struct ecore_mcast_ramrod_params *p,
3338                                        int old_num_bins)
3339 {
3340         /* Do nothing */
3341 }
3342
3343 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
3344 do { \
3345         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3346 } while (0)
3347
3348 static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3349                                            struct ecore_mcast_obj *o,
3350                                            struct ecore_mcast_ramrod_params *p,
3351                                            uint32_t *mc_filter)
3352 {
3353         struct ecore_mcast_list_elem *mlist_pos;
3354         int bit;
3355
3356         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3357                                   struct ecore_mcast_list_elem) {
3358                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3359                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3360
3361                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3362                           mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3363
3364                 /* bookkeeping... */
3365                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3366                                   bit);
3367         }
3368 }
3369
3370 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3371         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3372         uint32_t *mc_filter)
3373 {
3374         int bit;
3375
3376         for (bit = ecore_mcast_get_next_bin(o, 0);
3377              bit >= 0;
3378              bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3379                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3380                 ECORE_MSG(sc, "About to set bin %d\n", bit);
3381         }
3382 }
3383
3384 /* On 57711 we write the multicast MACs' approximate match
3385  * table by directly into the TSTORM's internal RAM. So we don't
3386  * really need to handle any tricks to make it work.
3387  */
3388 static int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3389                                  struct ecore_mcast_ramrod_params *p,
3390                                  enum ecore_mcast_cmd cmd)
3391 {
3392         int i;
3393         struct ecore_mcast_obj *o = p->mcast_obj;
3394         struct ecore_raw_obj *r = &o->raw;
3395
3396         /* If CLEAR_ONLY has been requested - clear the registry
3397          * and clear a pending bit.
3398          */
3399         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3400                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3401
3402                 /* Set the multicast filter bits before writing it into
3403                  * the internal memory.
3404                  */
3405                 switch (cmd) {
3406                 case ECORE_MCAST_CMD_ADD:
3407                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3408                         break;
3409
3410                 case ECORE_MCAST_CMD_DEL:
3411                         ECORE_MSG(sc,
3412                                   "Invalidating multicast MACs configuration\n");
3413
3414                         /* clear the registry */
3415                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3416                                sizeof(o->registry.aprox_match.vec));
3417                         break;
3418
3419                 case ECORE_MCAST_CMD_RESTORE:
3420                         ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3421                         break;
3422
3423                 default:
3424                         ECORE_ERR("Unknown command: %d\n", cmd);
3425                         return ECORE_INVAL;
3426                 }
3427
3428                 /* Set the mcast filter in the internal memory */
3429                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3430                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3431         } else
3432                 /* clear the registry */
3433                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3434                        sizeof(o->registry.aprox_match.vec));
3435
3436         /* We are done */
3437         r->clear_pending(r);
3438
3439         return ECORE_SUCCESS;
3440 }
3441
3442 static int ecore_mcast_validate_e1(struct bxe_softc *sc,
3443                                    struct ecore_mcast_ramrod_params *p,
3444                                    enum ecore_mcast_cmd cmd)
3445 {
3446         struct ecore_mcast_obj *o = p->mcast_obj;
3447         int reg_sz = o->get_registry_size(o);
3448
3449         switch (cmd) {
3450         /* DEL command deletes all currently configured MACs */
3451         case ECORE_MCAST_CMD_DEL:
3452                 o->set_registry_size(o, 0);
3453                 /* Don't break */
3454
3455         /* RESTORE command will restore the entire multicast configuration */
3456         case ECORE_MCAST_CMD_RESTORE:
3457                 p->mcast_list_len = reg_sz;
3458                   ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3459                                   cmd, p->mcast_list_len);
3460                 break;
3461
3462         case ECORE_MCAST_CMD_ADD:
3463         case ECORE_MCAST_CMD_CONT:
3464                 /* Multicast MACs on 57710 are configured as unicast MACs and
3465                  * there is only a limited number of CAM entries for that
3466                  * matter.
3467                  */
3468                 if (p->mcast_list_len > o->max_cmd_len) {
3469                         ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3470                                   o->max_cmd_len);
3471                         return ECORE_INVAL;
3472                 }
3473                 /* Every configured MAC should be cleared if DEL command is
3474                  * called. Only the last ADD command is relevant as long as
3475                  * every ADD commands overrides the previous configuration.
3476                  */
3477                 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3478                 if (p->mcast_list_len > 0)
3479                         o->set_registry_size(o, p->mcast_list_len);
3480
3481                 break;
3482
3483         default:
3484                 ECORE_ERR("Unknown command: %d\n", cmd);
3485                 return ECORE_INVAL;
3486         }
3487
3488         /* We want to ensure that commands are executed one by one for 57710.
3489          * Therefore each none-empty command will consume o->max_cmd_len.
3490          */
3491         if (p->mcast_list_len)
3492                 o->total_pending_num += o->max_cmd_len;
3493
3494         return ECORE_SUCCESS;
3495 }
3496
3497 static void ecore_mcast_revert_e1(struct bxe_softc *sc,
3498                                       struct ecore_mcast_ramrod_params *p,
3499                                       int old_num_macs)
3500 {
3501         struct ecore_mcast_obj *o = p->mcast_obj;
3502
3503         o->set_registry_size(o, old_num_macs);
3504
3505         /* If current command hasn't been handled yet and we are
3506          * here means that it's meant to be dropped and we have to
3507          * update the number of outstanding MACs accordingly.
3508          */
3509         if (p->mcast_list_len)
3510                 o->total_pending_num -= o->max_cmd_len;
3511 }
3512
3513 static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3514                                         struct ecore_mcast_obj *o, int idx,
3515                                         union ecore_mcast_config_data *cfg_data,
3516                                         enum ecore_mcast_cmd cmd)
3517 {
3518         struct ecore_raw_obj *r = &o->raw;
3519         struct mac_configuration_cmd *data =
3520                 (struct mac_configuration_cmd *)(r->rdata);
3521
3522         /* copy mac */
3523         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3524                 ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3525                                       &data->config_table[idx].middle_mac_addr,
3526                                       &data->config_table[idx].lsb_mac_addr,
3527                                       cfg_data->mac);
3528
3529                 data->config_table[idx].vlan_id = 0;
3530                 data->config_table[idx].pf_id = r->func_id;
3531                 data->config_table[idx].clients_bit_vector =
3532                         ECORE_CPU_TO_LE32(1 << r->cl_id);
3533
3534                 ECORE_SET_FLAG(data->config_table[idx].flags,
3535                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3536                                T_ETH_MAC_COMMAND_SET);
3537         }
3538 }
3539
3540 /**
3541  * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3542  *
3543  * @sc:         device handle
3544  * @p:
3545  * @len:        number of rules to handle
3546  */
3547 static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3548                                         struct ecore_mcast_ramrod_params *p,
3549                                         uint8_t len)
3550 {
3551         struct ecore_raw_obj *r = &p->mcast_obj->raw;
3552         struct mac_configuration_cmd *data =
3553                 (struct mac_configuration_cmd *)(r->rdata);
3554
3555         uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3556                      ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3557                      ECORE_MAX_MULTICAST*(1 + r->func_id));
3558
3559         data->hdr.offset = offset;
3560         data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3561         data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3562                                      (ECORE_FILTER_MCAST_PENDING <<
3563                                       ECORE_SWCID_SHIFT));
3564         data->hdr.length = len;
3565 }
3566
3567 /**
3568  * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3569  *
3570  * @sc:         device handle
3571  * @o:
3572  * @start_idx:  index in the registry to start from
3573  * @rdata_idx:  index in the ramrod data to start from
3574  *
3575  * restore command for 57710 is like all other commands - always a stand alone
3576  * command - start_idx and rdata_idx will always be 0. This function will always
3577  * succeed.
3578  * returns -1 to comply with 57712 variant.
3579  */
3580 static inline int ecore_mcast_handle_restore_cmd_e1(
3581         struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3582         int *rdata_idx)
3583 {
3584         struct ecore_mcast_mac_elem *elem;
3585         int i = 0;
3586         union ecore_mcast_config_data cfg_data = {NULL};
3587
3588         /* go through the registry and configure the MACs from it. */
3589         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3590                                   struct ecore_mcast_mac_elem) {
3591                 cfg_data.mac = &elem->mac[0];
3592                 o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3593
3594                 i++;
3595
3596                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3597                           cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3598         }
3599
3600         *rdata_idx = i;
3601
3602         return -1;
3603 }
3604
3605 static inline int ecore_mcast_handle_pending_cmds_e1(
3606         struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3607 {
3608         struct ecore_pending_mcast_cmd *cmd_pos;
3609         struct ecore_mcast_mac_elem *pmac_pos;
3610         struct ecore_mcast_obj *o = p->mcast_obj;
3611         union ecore_mcast_config_data cfg_data = {NULL};
3612         int cnt = 0;
3613
3614         /* If nothing to be done - return */
3615         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3616                 return 0;
3617
3618         /* Handle the first command */
3619         cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3620                                          struct ecore_pending_mcast_cmd, link);
3621
3622         switch (cmd_pos->type) {
3623         case ECORE_MCAST_CMD_ADD:
3624                 ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3625                                           link, struct ecore_mcast_mac_elem) {
3626                         cfg_data.mac = &pmac_pos->mac[0];
3627                         o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3628
3629                         cnt++;
3630
3631                         ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3632                                   pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3633                 }
3634                 break;
3635
3636         case ECORE_MCAST_CMD_DEL:
3637                 cnt = cmd_pos->data.macs_num;
3638                 ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3639                 break;
3640
3641         case ECORE_MCAST_CMD_RESTORE:
3642                 o->hdl_restore(sc, o, 0, &cnt);
3643                 break;
3644
3645         default:
3646                 ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3647                 return ECORE_INVAL;
3648         }
3649
3650         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3651         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3652
3653         return cnt;
3654 }
3655
3656 /**
3657  * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3658  *
3659  * @fw_hi:
3660  * @fw_mid:
3661  * @fw_lo:
3662  * @mac:
3663  */
3664 static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3665                                          uint16_t *fw_lo, uint8_t *mac)
3666 {
3667         mac[1] = ((uint8_t *)fw_hi)[0];
3668         mac[0] = ((uint8_t *)fw_hi)[1];
3669         mac[3] = ((uint8_t *)fw_mid)[0];
3670         mac[2] = ((uint8_t *)fw_mid)[1];
3671         mac[5] = ((uint8_t *)fw_lo)[0];
3672         mac[4] = ((uint8_t *)fw_lo)[1];
3673 }
3674
3675 /**
3676  * ecore_mcast_refresh_registry_e1 -
3677  *
3678  * @sc:         device handle
3679  * @cnt:
3680  *
3681  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3682  * and update the registry correspondingly: if ADD - allocate a memory and add
3683  * the entries to the registry (list), if DELETE - clear the registry and free
3684  * the memory.
3685  */
3686 static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3687                                                   struct ecore_mcast_obj *o)
3688 {
3689         struct ecore_raw_obj *raw = &o->raw;
3690         struct ecore_mcast_mac_elem *elem;
3691         struct mac_configuration_cmd *data =
3692                         (struct mac_configuration_cmd *)(raw->rdata);
3693
3694         /* If first entry contains a SET bit - the command was ADD,
3695          * otherwise - DEL_ALL
3696          */
3697         if (ECORE_GET_FLAG(data->config_table[0].flags,
3698                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3699                 int i, len = data->hdr.length;
3700
3701                 /* Break if it was a RESTORE command */
3702                 if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3703                         return ECORE_SUCCESS;
3704
3705                 elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3706                 if (!elem) {
3707                         ECORE_ERR("Failed to allocate registry memory\n");
3708                         return ECORE_NOMEM;
3709                 }
3710
3711                 for (i = 0; i < len; i++, elem++) {
3712                         ecore_get_fw_mac_addr(
3713                                 &data->config_table[i].msb_mac_addr,
3714                                 &data->config_table[i].middle_mac_addr,
3715                                 &data->config_table[i].lsb_mac_addr,
3716                                 elem->mac);
3717                         ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3718                                   elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3719                         ECORE_LIST_PUSH_TAIL(&elem->link,
3720                                              &o->registry.exact_match.macs);
3721                 }
3722         } else {
3723                 elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3724                                               struct ecore_mcast_mac_elem,
3725                                               link);
3726                 ECORE_MSG(sc, "Deleting a registry\n");
3727                 ECORE_FREE(sc, elem, sizeof(*elem));
3728                 ECORE_LIST_INIT(&o->registry.exact_match.macs);
3729         }
3730
3731         return ECORE_SUCCESS;
3732 }
3733
3734 static int ecore_mcast_setup_e1(struct bxe_softc *sc,
3735                                 struct ecore_mcast_ramrod_params *p,
3736                                 enum ecore_mcast_cmd cmd)
3737 {
3738         struct ecore_mcast_obj *o = p->mcast_obj;
3739         struct ecore_raw_obj *raw = &o->raw;
3740         struct mac_configuration_cmd *data =
3741                 (struct mac_configuration_cmd *)(raw->rdata);
3742         int cnt = 0, i, rc;
3743
3744         /* Reset the ramrod data buffer */
3745         ECORE_MEMSET(data, 0, sizeof(*data));
3746
3747         /* First set all entries as invalid */
3748         for (i = 0; i < o->max_cmd_len ; i++)
3749                 ECORE_SET_FLAG(data->config_table[i].flags,
3750                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3751                         T_ETH_MAC_COMMAND_INVALIDATE);
3752
3753         /* Handle pending commands first */
3754         cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3755
3756         /* If there are no more pending commands - clear SCHEDULED state */
3757         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3758                 o->clear_sched(o);
3759
3760         /* The below may be TRUE iff there were no pending commands */
3761         if (!cnt)
3762                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3763
3764         /* For 57710 every command has o->max_cmd_len length to ensure that
3765          * commands are done one at a time.
3766          */
3767         o->total_pending_num -= o->max_cmd_len;
3768
3769         /* send a ramrod */
3770
3771         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3772
3773         /* Set ramrod header (in particular, a number of entries to update) */
3774         ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3775
3776         /* update a registry: we need the registry contents to be always up
3777          * to date in order to be able to execute a RESTORE opcode. Here
3778          * we use the fact that for 57710 we sent one command at a time
3779          * hence we may take the registry update out of the command handling
3780          * and do it in a simpler way here.
3781          */
3782         rc = ecore_mcast_refresh_registry_e1(sc, o);
3783         if (rc)
3784                 return rc;
3785
3786         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3787          * RAMROD_PENDING status immediately.
3788          */
3789         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3790                 raw->clear_pending(raw);
3791                 return ECORE_SUCCESS;
3792         } else {
3793                 /* No need for an explicit memory barrier here as long we would
3794                  * need to ensure the ordering of writing to the SPQ element
3795                  * and updating of the SPQ producer which involves a memory
3796                  * read and we will have to put a full memory barrier there
3797                  * (inside ecore_sp_post()).
3798                  */
3799
3800                 /* Send a ramrod */
3801                 rc = ecore_sp_post( sc,
3802                                     RAMROD_CMD_ID_ETH_SET_MAC,
3803                                     raw->cid,
3804                                     raw->rdata_mapping,
3805                                     ETH_CONNECTION_TYPE);
3806                 if (rc)
3807                         return rc;
3808
3809                 /* Ramrod completion is pending */
3810                 return ECORE_PENDING;
3811         }
3812 }
3813
3814 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3815 {
3816         return o->registry.exact_match.num_macs_set;
3817 }
3818
3819 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3820 {
3821         return o->registry.aprox_match.num_bins_set;
3822 }
3823
3824 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3825                                                 int n)
3826 {
3827         o->registry.exact_match.num_macs_set = n;
3828 }
3829
3830 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3831                                                 int n)
3832 {
3833         o->registry.aprox_match.num_bins_set = n;
3834 }
3835
3836 int ecore_config_mcast(struct bxe_softc *sc,
3837                        struct ecore_mcast_ramrod_params *p,
3838                        enum ecore_mcast_cmd cmd)
3839 {
3840         struct ecore_mcast_obj *o = p->mcast_obj;
3841         struct ecore_raw_obj *r = &o->raw;
3842         int rc = 0, old_reg_size;
3843
3844         /* This is needed to recover number of currently configured mcast macs
3845          * in case of failure.
3846          */
3847         old_reg_size = o->get_registry_size(o);
3848
3849         /* Do some calculations and checks */
3850         rc = o->validate(sc, p, cmd);
3851         if (rc)
3852                 return rc;
3853
3854         /* Return if there is no work to do */
3855         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3856                 return ECORE_SUCCESS;
3857
3858         ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3859                   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3860
3861         /* Enqueue the current command to the pending list if we can't complete
3862          * it in the current iteration
3863          */
3864         if (r->check_pending(r) ||
3865             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3866                 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3867                 if (rc < 0)
3868                         goto error_exit1;
3869
3870                 /* As long as the current command is in a command list we
3871                  * don't need to handle it separately.
3872                  */
3873                 p->mcast_list_len = 0;
3874         }
3875
3876         if (!r->check_pending(r)) {
3877
3878                 /* Set 'pending' state */
3879                 r->set_pending(r);
3880
3881                 /* Configure the new classification in the chip */
3882                 rc = o->config_mcast(sc, p, cmd);
3883                 if (rc < 0)
3884                         goto error_exit2;
3885
3886                 /* Wait for a ramrod completion if was requested */
3887                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3888                         rc = o->wait_comp(sc, o);
3889         }
3890
3891         return rc;
3892
3893 error_exit2:
3894         r->clear_pending(r);
3895
3896 error_exit1:
3897         o->revert(sc, p, old_reg_size);
3898
3899         return rc;
3900 }
3901
3902 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3903 {
3904         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3905         ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3906         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3907 }
3908
3909 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3910 {
3911         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3912         ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3913         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3914 }
3915
3916 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3917 {
3918         return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3919 }
3920
3921 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3922 {
3923         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3924 }
3925
3926 void ecore_init_mcast_obj(struct bxe_softc *sc,
3927                           struct ecore_mcast_obj *mcast_obj,
3928                           uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
3929                           uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
3930                           int state, unsigned long *pstate, ecore_obj_type type)
3931 {
3932         ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3933
3934         ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3935                            rdata, rdata_mapping, state, pstate, type);
3936
3937         mcast_obj->engine_id = engine_id;
3938
3939         ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3940
3941         mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3942         mcast_obj->check_sched = ecore_mcast_check_sched;
3943         mcast_obj->set_sched = ecore_mcast_set_sched;
3944         mcast_obj->clear_sched = ecore_mcast_clear_sched;
3945
3946         if (CHIP_IS_E1(sc)) {
3947                 mcast_obj->config_mcast      = ecore_mcast_setup_e1;
3948                 mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3949                 mcast_obj->hdl_restore       =
3950                         ecore_mcast_handle_restore_cmd_e1;
3951                 mcast_obj->check_pending     = ecore_mcast_check_pending;
3952
3953                 if (CHIP_REV_IS_SLOW(sc))
3954                         mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
3955                 else
3956                         mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
3957
3958                 mcast_obj->wait_comp         = ecore_mcast_wait;
3959                 mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
3960                 mcast_obj->validate          = ecore_mcast_validate_e1;
3961                 mcast_obj->revert            = ecore_mcast_revert_e1;
3962                 mcast_obj->get_registry_size =
3963                         ecore_mcast_get_registry_size_exact;
3964                 mcast_obj->set_registry_size =
3965                         ecore_mcast_set_registry_size_exact;
3966
3967                 /* 57710 is the only chip that uses the exact match for mcast
3968                  * at the moment.
3969                  */
3970                 ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
3971
3972         } else if (CHIP_IS_E1H(sc)) {
3973                 mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
3974                 mcast_obj->enqueue_cmd   = NULL;
3975                 mcast_obj->hdl_restore   = NULL;
3976                 mcast_obj->check_pending = ecore_mcast_check_pending;
3977
3978                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3979                  * for one command.
3980                  */
3981                 mcast_obj->max_cmd_len       = -1;
3982                 mcast_obj->wait_comp         = ecore_mcast_wait;
3983                 mcast_obj->set_one_rule      = NULL;
3984                 mcast_obj->validate          = ecore_mcast_validate_e1h;
3985                 mcast_obj->revert            = ecore_mcast_revert_e1h;
3986                 mcast_obj->get_registry_size =
3987                         ecore_mcast_get_registry_size_aprox;
3988                 mcast_obj->set_registry_size =
3989                         ecore_mcast_set_registry_size_aprox;
3990         } else {
3991                 mcast_obj->config_mcast      = ecore_mcast_setup_e2;
3992                 mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3993                 mcast_obj->hdl_restore       =
3994                         ecore_mcast_handle_restore_cmd_e2;
3995                 mcast_obj->check_pending     = ecore_mcast_check_pending;
3996                 /* TODO: There should be a proper HSI define for this number!!!
3997                  */
3998                 mcast_obj->max_cmd_len       = 16;
3999                 mcast_obj->wait_comp         = ecore_mcast_wait;
4000                 mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
4001                 mcast_obj->validate          = ecore_mcast_validate_e2;
4002                 mcast_obj->revert            = ecore_mcast_revert_e2;
4003                 mcast_obj->get_registry_size =
4004                         ecore_mcast_get_registry_size_aprox;
4005                 mcast_obj->set_registry_size =
4006                         ecore_mcast_set_registry_size_aprox;
4007         }
4008 }
4009
4010 /*************************** Credit handling **********************************/
4011
4012 /**
4013  * atomic_add_ifless - add if the result is less than a given value.
4014  *
4015  * @v:  pointer of type ecore_atomic_t
4016  * @a:  the amount to add to v...
4017  * @u:  ...if (v + a) is less than u.
4018  *
4019  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4020  *
4021  */
4022 static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4023 {
4024         int c, old;
4025
4026         c = ECORE_ATOMIC_READ(v);
4027         for (;;) {
4028                 if (ECORE_UNLIKELY(c + a >= u))
4029                         return FALSE;
4030
4031                 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4032                 if (ECORE_LIKELY(old == c))
4033                         break;
4034                 c = old;
4035         }
4036
4037         return TRUE;
4038 }
4039
4040 /**
4041  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4042  *
4043  * @v:  pointer of type ecore_atomic_t
4044  * @a:  the amount to dec from v...
4045  * @u:  ...if (v - a) is more or equal than u.
4046  *
4047  * returns TRUE if (v - a) was more or equal than u, and FALSE
4048  * otherwise.
4049  */
4050 static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4051 {
4052         int c, old;
4053
4054         c = ECORE_ATOMIC_READ(v);
4055         for (;;) {
4056                 if (ECORE_UNLIKELY(c - a < u))
4057                         return FALSE;
4058
4059                 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4060                 if (ECORE_LIKELY(old == c))
4061                         break;
4062                 c = old;
4063         }
4064
4065         return TRUE;
4066 }
4067
4068 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4069 {
4070         bool rc;
4071
4072         ECORE_SMP_MB();
4073         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4074         ECORE_SMP_MB();
4075
4076         return rc;
4077 }
4078
4079 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4080 {
4081         bool rc;
4082
4083         ECORE_SMP_MB();
4084
4085         /* Don't let to refill if credit + cnt > pool_sz */
4086         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4087
4088         ECORE_SMP_MB();
4089
4090         return rc;
4091 }
4092
4093 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4094 {
4095         int cur_credit;
4096
4097         ECORE_SMP_MB();
4098         cur_credit = ECORE_ATOMIC_READ(&o->credit);
4099
4100         return cur_credit;
4101 }
4102
4103 static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4104                                           int cnt)
4105 {
4106         return TRUE;
4107 }
4108
4109 static bool ecore_credit_pool_get_entry(
4110         struct ecore_credit_pool_obj *o,
4111         int *offset)
4112 {
4113         int idx, vec, i;
4114
4115         *offset = -1;
4116
4117         /* Find "internal cam-offset" then add to base for this object... */
4118         for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4119
4120                 /* Skip the current vector if there are no free entries in it */
4121                 if (!o->pool_mirror[vec])
4122                         continue;
4123
4124                 /* If we've got here we are going to find a free entry */
4125                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4126                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
4127
4128                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4129                                 /* Got one!! */
4130                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4131                                 *offset = o->base_pool_offset + idx;
4132                                 return TRUE;
4133                         }
4134         }
4135
4136         return FALSE;
4137 }
4138
4139 static bool ecore_credit_pool_put_entry(
4140         struct ecore_credit_pool_obj *o,
4141         int offset)
4142 {
4143         if (offset < o->base_pool_offset)
4144                 return FALSE;
4145
4146         offset -= o->base_pool_offset;
4147
4148         if (offset >= o->pool_sz)
4149                 return FALSE;
4150
4151         /* Return the entry to the pool */
4152         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4153
4154         return TRUE;
4155 }
4156
4157 static bool ecore_credit_pool_put_entry_always_TRUE(
4158         struct ecore_credit_pool_obj *o,
4159         int offset)
4160 {
4161         return TRUE;
4162 }
4163
4164 static bool ecore_credit_pool_get_entry_always_TRUE(
4165         struct ecore_credit_pool_obj *o,
4166         int *offset)
4167 {
4168         *offset = -1;
4169         return TRUE;
4170 }
4171 /**
4172  * ecore_init_credit_pool - initialize credit pool internals.
4173  *
4174  * @p:
4175  * @base:       Base entry in the CAM to use.
4176  * @credit:     pool size.
4177  *
4178  * If base is negative no CAM entries handling will be performed.
4179  * If credit is negative pool operations will always succeed (unlimited pool).
4180  *
4181  */
4182 static inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4183                                           int base, int credit)
4184 {
4185         /* Zero the object first */
4186         ECORE_MEMSET(p, 0, sizeof(*p));
4187
4188         /* Set the table to all 1s */
4189         ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4190
4191         /* Init a pool as full */
4192         ECORE_ATOMIC_SET(&p->credit, credit);
4193
4194         /* The total poll size */
4195         p->pool_sz = credit;
4196
4197         p->base_pool_offset = base;
4198
4199         /* Commit the change */
4200         ECORE_SMP_MB();
4201
4202         p->check = ecore_credit_pool_check;
4203
4204         /* if pool credit is negative - disable the checks */
4205         if (credit >= 0) {
4206                 p->put      = ecore_credit_pool_put;
4207                 p->get      = ecore_credit_pool_get;
4208                 p->put_entry = ecore_credit_pool_put_entry;
4209                 p->get_entry = ecore_credit_pool_get_entry;
4210         } else {
4211                 p->put      = ecore_credit_pool_always_TRUE;
4212                 p->get      = ecore_credit_pool_always_TRUE;
4213                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4214                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4215         }
4216
4217         /* If base is negative - disable entries handling */
4218         if (base < 0) {
4219                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4220                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4221         }
4222 }
4223
4224 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
4225                                 struct ecore_credit_pool_obj *p, uint8_t func_id,
4226                                 uint8_t func_num)
4227 {
4228 /* TODO: this will be defined in consts as well... */
4229 #define ECORE_CAM_SIZE_EMUL 5
4230
4231         int cam_sz;
4232
4233         if (CHIP_IS_E1(sc)) {
4234                 /* In E1, Multicast is saved in cam... */
4235                 if (!CHIP_REV_IS_SLOW(sc))
4236                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4237                 else
4238                         cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4239
4240                 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4241
4242         } else if (CHIP_IS_E1H(sc)) {
4243                 /* CAM credit is equally divided between all active functions
4244                  * on the PORT!.
4245                  */
4246                 if ((func_num > 0)) {
4247                         if (!CHIP_REV_IS_SLOW(sc))
4248                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4249                         else
4250                                 cam_sz = ECORE_CAM_SIZE_EMUL;
4251                         ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4252                 } else {
4253                         /* this should never happen! Block MAC operations. */
4254                         ecore_init_credit_pool(p, 0, 0);
4255                 }
4256
4257         } else {
4258
4259                 /*
4260                  * CAM credit is equaly divided between all active functions
4261                  * on the PATH.
4262                  */
4263                 if ((func_num > 1)) {
4264                         if (!CHIP_REV_IS_SLOW(sc))
4265                                 cam_sz = (MAX_MAC_CREDIT_E2
4266                                 - GET_NUM_VFS_PER_PATH(sc))
4267                                 / func_num 
4268                                 + GET_NUM_VFS_PER_PF(sc);
4269                         else
4270                                 cam_sz = ECORE_CAM_SIZE_EMUL;
4271
4272                         /* No need for CAM entries handling for 57712 and
4273                          * newer.
4274                          */
4275                         ecore_init_credit_pool(p, -1, cam_sz);
4276                 } else if (func_num == 1) {
4277                         if (!CHIP_REV_IS_SLOW(sc))
4278                                 cam_sz = MAX_MAC_CREDIT_E2;
4279                         else
4280                                 cam_sz = ECORE_CAM_SIZE_EMUL;
4281
4282                         /* No need for CAM entries handling for 57712 and
4283                          * newer.
4284                          */
4285                         ecore_init_credit_pool(p, -1, cam_sz);
4286                 } else {
4287                         /* this should never happen! Block MAC operations. */
4288                         ecore_init_credit_pool(p, 0, 0);
4289                 }
4290         }
4291 }
4292
4293 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4294                                  struct ecore_credit_pool_obj *p,
4295                                  uint8_t func_id,
4296                                  uint8_t func_num)
4297 {
4298         if (CHIP_IS_E1x(sc)) {
4299                 /* There is no VLAN credit in HW on 57710 and 57711 only
4300                  * MAC / MAC-VLAN can be set
4301                  */
4302                 ecore_init_credit_pool(p, 0, -1);
4303         } else {
4304                 /* CAM credit is equally divided between all active functions
4305                  * on the PATH.
4306                  */
4307                 if (func_num > 0) {
4308                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
4309                         ecore_init_credit_pool(p, func_id * credit, credit);
4310                 } else
4311                         /* this should never happen! Block VLAN operations. */
4312                         ecore_init_credit_pool(p, 0, 0);
4313         }
4314 }
4315
4316 /****************** RSS Configuration ******************/
4317
4318 /**
4319  * ecore_setup_rss - configure RSS
4320  *
4321  * @sc:         device handle
4322  * @p:          rss configuration
4323  *
4324  * sends on UPDATE ramrod for that matter.
4325  */
4326 static int ecore_setup_rss(struct bxe_softc *sc,
4327                            struct ecore_config_rss_params *p)
4328 {
4329         struct ecore_rss_config_obj *o = p->rss_obj;
4330         struct ecore_raw_obj *r = &o->raw;
4331         struct eth_rss_update_ramrod_data *data =
4332                 (struct eth_rss_update_ramrod_data *)(r->rdata);
4333         uint8_t rss_mode = 0;
4334         int rc;
4335
4336         ECORE_MEMSET(data, 0, sizeof(*data));
4337
4338         ECORE_MSG(sc, "Configuring RSS\n");
4339
4340         /* Set an echo field */
4341         data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4342                                  (r->state << ECORE_SWCID_SHIFT));
4343
4344         /* RSS mode */
4345         if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4346                 rss_mode = ETH_RSS_MODE_DISABLED;
4347         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4348                 rss_mode = ETH_RSS_MODE_REGULAR;
4349 #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
4350         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags))
4351                 rss_mode = ETH_RSS_MODE_ESX51;
4352 #endif
4353
4354         data->rss_mode = rss_mode;
4355
4356         ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4357
4358         /* RSS capabilities */
4359         if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4360                 data->capabilities |=
4361                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4362
4363         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4364                 data->capabilities |=
4365                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4366
4367         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4368                 data->capabilities |=
4369                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4370
4371         if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4372                 data->capabilities |=
4373                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4374
4375         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4376                 data->capabilities |=
4377                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4378
4379         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4380                 data->capabilities |=
4381                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4382
4383         if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
4384                 data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask);
4385                 data->udp_4tuple_dst_port_value =
4386                         ECORE_CPU_TO_LE16(p->tunnel_value);
4387         }
4388
4389         /* Hashing mask */
4390         data->rss_result_mask = p->rss_result_mask;
4391
4392         /* RSS engine ID */
4393         data->rss_engine_id = o->engine_id;
4394
4395         ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4396
4397         /* Indirection table */
4398         ECORE_MEMCPY(data->indirection_table, p->ind_table,
4399                   T_ETH_INDIRECTION_TABLE_SIZE);
4400
4401         /* Remember the last configuration */
4402         ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4403
4404
4405         /* RSS keys */
4406         if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4407                 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4408                        sizeof(data->rss_key));
4409                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4410         }
4411
4412         /* No need for an explicit memory barrier here as long we would
4413          * need to ensure the ordering of writing to the SPQ element
4414          * and updating of the SPQ producer which involves a memory
4415          * read and we will have to put a full memory barrier there
4416          * (inside ecore_sp_post()).
4417          */
4418
4419         /* Send a ramrod */
4420         rc = ecore_sp_post(sc,
4421                              RAMROD_CMD_ID_ETH_RSS_UPDATE,
4422                              r->cid,
4423                              r->rdata_mapping,
4424                              ETH_CONNECTION_TYPE);
4425
4426         if (rc < 0)
4427                 return rc;
4428
4429         return ECORE_PENDING;
4430 }
4431
4432 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4433                              uint8_t *ind_table)
4434 {
4435         ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4436 }
4437
4438 int ecore_config_rss(struct bxe_softc *sc,
4439                      struct ecore_config_rss_params *p)
4440 {
4441         int rc;
4442         struct ecore_rss_config_obj *o = p->rss_obj;
4443         struct ecore_raw_obj *r = &o->raw;
4444
4445         /* Do nothing if only driver cleanup was requested */
4446         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4447                 return ECORE_SUCCESS;
4448
4449         r->set_pending(r);
4450
4451         rc = o->config_rss(sc, p);
4452         if (rc < 0) {
4453                 r->clear_pending(r);
4454                 return rc;
4455         }
4456
4457         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4458                 rc = r->wait_comp(sc, r);
4459
4460         return rc;
4461 }
4462
4463 void ecore_init_rss_config_obj(struct bxe_softc *sc,
4464                                struct ecore_rss_config_obj *rss_obj,
4465                                uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4466                                void *rdata, ecore_dma_addr_t rdata_mapping,
4467                                int state, unsigned long *pstate,
4468                                ecore_obj_type type)
4469 {
4470         ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4471                            rdata_mapping, state, pstate, type);
4472
4473         rss_obj->engine_id  = engine_id;
4474         rss_obj->config_rss = ecore_setup_rss;
4475 }
4476
4477 int validate_vlan_mac(struct bxe_softc *sc,
4478                       struct ecore_vlan_mac_obj *vlan_mac)
4479 {
4480         if (!vlan_mac->get_n_elements) {
4481                 ECORE_ERR("vlan mac object was not intialized\n");
4482                 return ECORE_INVAL;
4483         }
4484         return 0;
4485 }
4486
4487 /********************** Queue state object ***********************************/
4488
4489 /**
4490  * ecore_queue_state_change - perform Queue state change transition
4491  *
4492  * @sc:         device handle
4493  * @params:     parameters to perform the transition
4494  *
4495  * returns 0 in case of successfully completed transition, negative error
4496  * code in case of failure, positive (EBUSY) value if there is a completion
4497  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4498  * not set in params->ramrod_flags for asynchronous commands).
4499  *
4500  */
4501 int ecore_queue_state_change(struct bxe_softc *sc,
4502                              struct ecore_queue_state_params *params)
4503 {
4504         struct ecore_queue_sp_obj *o = params->q_obj;
4505         int rc, pending_bit;
4506         unsigned long *pending = &o->pending;
4507
4508         /* Check that the requested transition is legal */
4509         rc = o->check_transition(sc, o, params);
4510         if (rc) {
4511                 ECORE_ERR("check transition returned an error. rc %d\n", rc);
4512                 return ECORE_INVAL;
4513         }
4514
4515         /* Set "pending" bit */
4516         ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4517         pending_bit = o->set_pending(o, params);
4518         ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4519
4520         /* Don't send a command if only driver cleanup was requested */
4521         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4522                 o->complete_cmd(sc, o, pending_bit);
4523         else {
4524                 /* Send a ramrod */
4525                 rc = o->send_cmd(sc, params);
4526                 if (rc) {
4527                         o->next_state = ECORE_Q_STATE_MAX;
4528                         ECORE_CLEAR_BIT(pending_bit, pending);
4529                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
4530                         return rc;
4531                 }
4532
4533                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4534                         rc = o->wait_comp(sc, o, pending_bit);
4535                         if (rc)
4536                                 return rc;
4537
4538                         return ECORE_SUCCESS;
4539                 }
4540         }
4541
4542         return ECORE_RET_PENDING(pending_bit, pending);
4543 }
4544
4545 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4546                                    struct ecore_queue_state_params *params)
4547 {
4548         enum ecore_queue_cmd cmd = params->cmd, bit;
4549
4550         /* ACTIVATE and DEACTIVATE commands are implemented on top of
4551          * UPDATE command.
4552          */
4553         if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4554             (cmd == ECORE_Q_CMD_DEACTIVATE))
4555                 bit = ECORE_Q_CMD_UPDATE;
4556         else
4557                 bit = cmd;
4558
4559         ECORE_SET_BIT(bit, &obj->pending);
4560         return bit;
4561 }
4562
4563 static int ecore_queue_wait_comp(struct bxe_softc *sc,
4564                                  struct ecore_queue_sp_obj *o,
4565                                  enum ecore_queue_cmd cmd)
4566 {
4567         return ecore_state_wait(sc, cmd, &o->pending);
4568 }
4569
4570 /**
4571  * ecore_queue_comp_cmd - complete the state change command.
4572  *
4573  * @sc:         device handle
4574  * @o:
4575  * @cmd:
4576  *
4577  * Checks that the arrived completion is expected.
4578  */
4579 static int ecore_queue_comp_cmd(struct bxe_softc *sc,
4580                                 struct ecore_queue_sp_obj *o,
4581                                 enum ecore_queue_cmd cmd)
4582 {
4583         unsigned long cur_pending = o->pending;
4584
4585         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4586                 ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4587                           cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4588                           o->state, cur_pending, o->next_state);
4589                 return ECORE_INVAL;
4590         }
4591
4592         if (o->next_tx_only >= o->max_cos)
4593                 /* >= because tx only must always be smaller than cos since the
4594                  * primary connection supports COS 0
4595                  */
4596                 ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4597                           o->next_tx_only, o->max_cos);
4598
4599         ECORE_MSG(sc,
4600                   "Completing command %d for queue %d, setting state to %d\n",
4601                   cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4602
4603         if (o->next_tx_only)  /* print num tx-only if any exist */
4604                 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4605                           o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4606
4607         o->state = o->next_state;
4608         o->num_tx_only = o->next_tx_only;
4609         o->next_state = ECORE_Q_STATE_MAX;
4610
4611         /* It's important that o->state and o->next_state are
4612          * updated before o->pending.
4613          */
4614         wmb();
4615
4616         ECORE_CLEAR_BIT(cmd, &o->pending);
4617         ECORE_SMP_MB_AFTER_CLEAR_BIT();
4618
4619         return ECORE_SUCCESS;
4620 }
4621
4622 static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4623                                 struct ecore_queue_state_params *cmd_params,
4624                                 struct client_init_ramrod_data *data)
4625 {
4626         struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4627
4628         /* Rx data */
4629
4630         /* IPv6 TPA supported for E2 and above only */
4631         data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4632                                           &params->flags) *
4633                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4634 }
4635
4636 static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4637                                 struct ecore_queue_sp_obj *o,
4638                                 struct ecore_general_setup_params *params,
4639                                 struct client_init_general_data *gen_data,
4640                                 unsigned long *flags)
4641 {
4642         gen_data->client_id = o->cl_id;
4643
4644         if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4645                 gen_data->statistics_counter_id =
4646                                         params->stat_id;
4647                 gen_data->statistics_en_flg = 1;
4648                 gen_data->statistics_zero_flg =
4649                         ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4650         } else
4651                 gen_data->statistics_counter_id =
4652                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
4653
4654         gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4655                                                    flags);
4656         gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4657                                                     flags);
4658         gen_data->sp_client_id = params->spcl_id;
4659         gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4660         gen_data->func_id = o->func_id;
4661
4662         gen_data->cos = params->cos;
4663
4664         gen_data->traffic_type =
4665                 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4666                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4667
4668         ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4669                   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4670 }
4671
4672 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4673                                 struct ecore_txq_setup_params *params,
4674                                 struct client_init_tx_data *tx_data,
4675                                 unsigned long *flags)
4676 {
4677         tx_data->enforce_security_flg =
4678                 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4679         tx_data->default_vlan =
4680                 ECORE_CPU_TO_LE16(params->default_vlan);
4681         tx_data->default_vlan_flg =
4682                 ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4683         tx_data->tx_switching_flg =
4684                 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4685         tx_data->anti_spoofing_flg =
4686                 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4687         tx_data->force_default_pri_flg =
4688                 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4689         tx_data->refuse_outband_vlan_flg =
4690                 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4691         tx_data->tunnel_lso_inc_ip_id =
4692                 ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4693         tx_data->tunnel_non_lso_pcsum_location =
4694                 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4695                                                             CSUM_ON_BD;
4696
4697         tx_data->tx_status_block_id = params->fw_sb_id;
4698         tx_data->tx_sb_index_number = params->sb_cq_index;
4699         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4700
4701         tx_data->tx_bd_page_base.lo =
4702                 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4703         tx_data->tx_bd_page_base.hi =
4704                 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4705
4706         /* Don't configure any Tx switching mode during queue SETUP */
4707         tx_data->state = 0;
4708 }
4709
4710 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4711                                 struct rxq_pause_params *params,
4712                                 struct client_init_rx_data *rx_data)
4713 {
4714         /* flow control data */
4715         rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4716         rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4717         rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4718         rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4719         rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4720         rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4721         rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4722 }
4723
4724 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4725                                 struct ecore_rxq_setup_params *params,
4726                                 struct client_init_rx_data *rx_data,
4727                                 unsigned long *flags)
4728 {
4729         rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4730                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4731         rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4732                                 CLIENT_INIT_RX_DATA_TPA_MODE;
4733         rx_data->vmqueue_mode_en_flg = 0;
4734
4735         rx_data->extra_data_over_sgl_en_flg =
4736                 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4737         rx_data->cache_line_alignment_log_size =
4738                 params->cache_line_log;
4739         rx_data->enable_dynamic_hc =
4740                 ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4741         rx_data->max_sges_for_packet = params->max_sges_pkt;
4742         rx_data->client_qzone_id = params->cl_qzone_id;
4743         rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4744
4745         /* Always start in DROP_ALL mode */
4746         rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4747                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4748
4749         /* We don't set drop flags */
4750         rx_data->drop_ip_cs_err_flg = 0;
4751         rx_data->drop_tcp_cs_err_flg = 0;
4752         rx_data->drop_ttl0_flg = 0;
4753         rx_data->drop_udp_cs_err_flg = 0;
4754         rx_data->inner_vlan_removal_enable_flg =
4755                 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4756         rx_data->outer_vlan_removal_enable_flg =
4757                 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4758         rx_data->status_block_id = params->fw_sb_id;
4759         rx_data->rx_sb_index_number = params->sb_cq_index;
4760         rx_data->max_tpa_queues = params->max_tpa_queues;
4761         rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4762         rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4763         rx_data->bd_page_base.lo =
4764                 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4765         rx_data->bd_page_base.hi =
4766                 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4767         rx_data->sge_page_base.lo =
4768                 ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4769         rx_data->sge_page_base.hi =
4770                 ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4771         rx_data->cqe_page_base.lo =
4772                 ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4773         rx_data->cqe_page_base.hi =
4774                 ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4775         rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4776                                                  flags);
4777
4778         if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4779                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4780                 rx_data->is_approx_mcast = 1;
4781         }
4782
4783         rx_data->rss_engine_id = params->rss_engine_id;
4784
4785         /* silent vlan removal */
4786         rx_data->silent_vlan_removal_flg =
4787                 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4788         rx_data->silent_vlan_value =
4789                 ECORE_CPU_TO_LE16(params->silent_removal_value);
4790         rx_data->silent_vlan_mask =
4791                 ECORE_CPU_TO_LE16(params->silent_removal_mask);
4792 }
4793
4794 /* initialize the general, tx and rx parts of a queue object */
4795 static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4796                                 struct ecore_queue_state_params *cmd_params,
4797                                 struct client_init_ramrod_data *data)
4798 {
4799         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4800                                        &cmd_params->params.setup.gen_params,
4801                                        &data->general,
4802                                        &cmd_params->params.setup.flags);
4803
4804         ecore_q_fill_init_tx_data(cmd_params->q_obj,
4805                                   &cmd_params->params.setup.txq_params,
4806                                   &data->tx,
4807                                   &cmd_params->params.setup.flags);
4808
4809         ecore_q_fill_init_rx_data(cmd_params->q_obj,
4810                                   &cmd_params->params.setup.rxq_params,
4811                                   &data->rx,
4812                                   &cmd_params->params.setup.flags);
4813
4814         ecore_q_fill_init_pause_data(cmd_params->q_obj,
4815                                      &cmd_params->params.setup.pause_params,
4816                                      &data->rx);
4817 }
4818
4819 /* initialize the general and tx parts of a tx-only queue object */
4820 static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4821                                 struct ecore_queue_state_params *cmd_params,
4822                                 struct tx_queue_init_ramrod_data *data)
4823 {
4824         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4825                                        &cmd_params->params.tx_only.gen_params,
4826                                        &data->general,
4827                                        &cmd_params->params.tx_only.flags);
4828
4829         ecore_q_fill_init_tx_data(cmd_params->q_obj,
4830                                   &cmd_params->params.tx_only.txq_params,
4831                                   &data->tx,
4832                                   &cmd_params->params.tx_only.flags);
4833
4834         ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4835                   cmd_params->q_obj->cids[0],
4836                   data->tx.tx_bd_page_base.lo,
4837                   data->tx.tx_bd_page_base.hi);
4838 }
4839
4840 /**
4841  * ecore_q_init - init HW/FW queue
4842  *
4843  * @sc:         device handle
4844  * @params:
4845  *
4846  * HW/FW initial Queue configuration:
4847  *      - HC: Rx and Tx
4848  *      - CDU context validation
4849  *
4850  */
4851 static inline int ecore_q_init(struct bxe_softc *sc,
4852                                struct ecore_queue_state_params *params)
4853 {
4854         struct ecore_queue_sp_obj *o = params->q_obj;
4855         struct ecore_queue_init_params *init = &params->params.init;
4856         uint16_t hc_usec;
4857         uint8_t cos;
4858
4859         /* Tx HC configuration */
4860         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
4861             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
4862                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4863
4864                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
4865                         init->tx.sb_cq_index,
4866                         !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
4867                         hc_usec);
4868         }
4869
4870         /* Rx HC configuration */
4871         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
4872             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
4873                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4874
4875                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
4876                         init->rx.sb_cq_index,
4877                         !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
4878                         hc_usec);
4879         }
4880
4881         /* Set CDU context validation values */
4882         for (cos = 0; cos < o->max_cos; cos++) {
4883                 ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
4884                           o->cids[cos], cos);
4885                 ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
4886                 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
4887         }
4888
4889         /* As no ramrod is sent, complete the command immediately  */
4890         o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
4891
4892         ECORE_MMIOWB();
4893         ECORE_SMP_MB();
4894
4895         return ECORE_SUCCESS;
4896 }
4897
4898 static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
4899                                         struct ecore_queue_state_params *params)
4900 {
4901         struct ecore_queue_sp_obj *o = params->q_obj;
4902         struct client_init_ramrod_data *rdata =
4903                 (struct client_init_ramrod_data *)o->rdata;
4904         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4905         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4906
4907         /* Clear the ramrod data */
4908         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4909
4910         /* Fill the ramrod data */
4911         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4912
4913         /* No need for an explicit memory barrier here as long we would
4914          * need to ensure the ordering of writing to the SPQ element
4915          * and updating of the SPQ producer which involves a memory
4916          * read and we will have to put a full memory barrier there
4917          * (inside ecore_sp_post()).
4918          */
4919
4920         return ecore_sp_post(sc,
4921                              ramrod,
4922                              o->cids[ECORE_PRIMARY_CID_INDEX],
4923                              data_mapping,
4924                              ETH_CONNECTION_TYPE);
4925 }
4926
4927 static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
4928                                         struct ecore_queue_state_params *params)
4929 {
4930         struct ecore_queue_sp_obj *o = params->q_obj;
4931         struct client_init_ramrod_data *rdata =
4932                 (struct client_init_ramrod_data *)o->rdata;
4933         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4934         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4935
4936         /* Clear the ramrod data */
4937         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4938
4939         /* Fill the ramrod data */
4940         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4941         ecore_q_fill_setup_data_e2(sc, params, rdata);
4942
4943         /* No need for an explicit memory barrier here as long we would
4944          * need to ensure the ordering of writing to the SPQ element
4945          * and updating of the SPQ producer which involves a memory
4946          * read and we will have to put a full memory barrier there
4947          * (inside ecore_sp_post()).
4948          */
4949
4950         return ecore_sp_post(sc,
4951                              ramrod,
4952                              o->cids[ECORE_PRIMARY_CID_INDEX],
4953                              data_mapping,
4954                              ETH_CONNECTION_TYPE);
4955 }
4956
4957 static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
4958                                   struct ecore_queue_state_params *params)
4959 {
4960         struct ecore_queue_sp_obj *o = params->q_obj;
4961         struct tx_queue_init_ramrod_data *rdata =
4962                 (struct tx_queue_init_ramrod_data *)o->rdata;
4963         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4964         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4965         struct ecore_queue_setup_tx_only_params *tx_only_params =
4966                 &params->params.tx_only;
4967         uint8_t cid_index = tx_only_params->cid_index;
4968
4969         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4970                 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4971         ECORE_MSG(sc, "sending forward tx-only ramrod");
4972
4973         if (cid_index >= o->max_cos) {
4974                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
4975                           o->cl_id, cid_index);
4976                 return ECORE_INVAL;
4977         }
4978
4979         ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
4980                   tx_only_params->gen_params.cos,
4981                   tx_only_params->gen_params.spcl_id);
4982
4983         /* Clear the ramrod data */
4984         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4985
4986         /* Fill the ramrod data */
4987         ecore_q_fill_setup_tx_only(sc, params, rdata);
4988
4989         ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4990                   o->cids[cid_index], rdata->general.client_id,
4991                   rdata->general.sp_client_id, rdata->general.cos);
4992
4993         /* No need for an explicit memory barrier here as long we would
4994          * need to ensure the ordering of writing to the SPQ element
4995          * and updating of the SPQ producer which involves a memory
4996          * read and we will have to put a full memory barrier there
4997          * (inside ecore_sp_post()).
4998          */
4999
5000         return ecore_sp_post(sc, ramrod, o->cids[cid_index],
5001                              data_mapping, ETH_CONNECTION_TYPE);
5002 }
5003
5004 static void ecore_q_fill_update_data(struct bxe_softc *sc,
5005                                      struct ecore_queue_sp_obj *obj,
5006                                      struct ecore_queue_update_params *params,
5007                                      struct client_update_ramrod_data *data)
5008 {
5009         /* Client ID of the client to update */
5010         data->client_id = obj->cl_id;
5011
5012         /* Function ID of the client to update */
5013         data->func_id = obj->func_id;
5014
5015         /* Default VLAN value */
5016         data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
5017
5018         /* Inner VLAN stripping */
5019         data->inner_vlan_removal_enable_flg =
5020                 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5021                                &params->update_flags);
5022         data->inner_vlan_removal_change_flg =
5023                 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5024                        &params->update_flags);
5025
5026         /* Outer VLAN stripping */
5027         data->outer_vlan_removal_enable_flg =
5028                 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5029                                &params->update_flags);
5030         data->outer_vlan_removal_change_flg =
5031                 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5032                        &params->update_flags);
5033
5034         /* Drop packets that have source MAC that doesn't belong to this
5035          * Queue.
5036          */
5037         data->anti_spoofing_enable_flg =
5038                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5039                                &params->update_flags);
5040         data->anti_spoofing_change_flg =
5041                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5042                        &params->update_flags);
5043
5044         /* Activate/Deactivate */
5045         data->activate_flg =
5046                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
5047         data->activate_change_flg =
5048                 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5049                                &params->update_flags);
5050
5051         /* Enable default VLAN */
5052         data->default_vlan_enable_flg =
5053                 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5054                                &params->update_flags);
5055         data->default_vlan_change_flg =
5056                 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5057                        &params->update_flags);
5058
5059         /* silent vlan removal */
5060         data->silent_vlan_change_flg =
5061                 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5062                                &params->update_flags);
5063         data->silent_vlan_removal_flg =
5064                 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5065                                &params->update_flags);
5066         data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5067         data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5068
5069         /* tx switching */
5070         data->tx_switching_flg =
5071                 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5072                                &params->update_flags);
5073         data->tx_switching_change_flg =
5074                 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5075                                &params->update_flags);
5076 }
5077
5078 static inline int ecore_q_send_update(struct bxe_softc *sc,
5079                                       struct ecore_queue_state_params *params)
5080 {
5081         struct ecore_queue_sp_obj *o = params->q_obj;
5082         struct client_update_ramrod_data *rdata =
5083                 (struct client_update_ramrod_data *)o->rdata;
5084         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5085         struct ecore_queue_update_params *update_params =
5086                 &params->params.update;
5087         uint8_t cid_index = update_params->cid_index;
5088
5089         if (cid_index >= o->max_cos) {
5090                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5091                           o->cl_id, cid_index);
5092                 return ECORE_INVAL;
5093         }
5094
5095         /* Clear the ramrod data */
5096         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5097
5098         /* Fill the ramrod data */
5099         ecore_q_fill_update_data(sc, o, update_params, rdata);
5100
5101         /* No need for an explicit memory barrier here as long we would
5102          * need to ensure the ordering of writing to the SPQ element
5103          * and updating of the SPQ producer which involves a memory
5104          * read and we will have to put a full memory barrier there
5105          * (inside ecore_sp_post()).
5106          */
5107
5108         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5109                              o->cids[cid_index], data_mapping,
5110                              ETH_CONNECTION_TYPE);
5111 }
5112
5113 /**
5114  * ecore_q_send_deactivate - send DEACTIVATE command
5115  *
5116  * @sc:         device handle
5117  * @params:
5118  *
5119  * implemented using the UPDATE command.
5120  */
5121 static inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5122                                         struct ecore_queue_state_params *params)
5123 {
5124         struct ecore_queue_update_params *update = &params->params.update;
5125
5126         ECORE_MEMSET(update, 0, sizeof(*update));
5127
5128         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5129
5130         return ecore_q_send_update(sc, params);
5131 }
5132
5133 /**
5134  * ecore_q_send_activate - send ACTIVATE command
5135  *
5136  * @sc:         device handle
5137  * @params:
5138  *
5139  * implemented using the UPDATE command.
5140  */
5141 static inline int ecore_q_send_activate(struct bxe_softc *sc,
5142                                         struct ecore_queue_state_params *params)
5143 {
5144         struct ecore_queue_update_params *update = &params->params.update;
5145
5146         ECORE_MEMSET(update, 0, sizeof(*update));
5147
5148         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5149         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5150
5151         return ecore_q_send_update(sc, params);
5152 }
5153
5154 static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5155                                         struct ecore_queue_state_params *params)
5156 {
5157         /* TODO: Not implemented yet. */
5158         return -1;
5159 }
5160
5161 static inline int ecore_q_send_halt(struct bxe_softc *sc,
5162                                     struct ecore_queue_state_params *params)
5163 {
5164         struct ecore_queue_sp_obj *o = params->q_obj;
5165
5166         /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5167         ecore_dma_addr_t data_mapping = 0;
5168         data_mapping = (ecore_dma_addr_t)o->cl_id;
5169
5170         return ecore_sp_post(sc,
5171                              RAMROD_CMD_ID_ETH_HALT,
5172                              o->cids[ECORE_PRIMARY_CID_INDEX],
5173                              data_mapping,
5174                              ETH_CONNECTION_TYPE);
5175 }
5176
5177 static inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5178                                        struct ecore_queue_state_params *params)
5179 {
5180         struct ecore_queue_sp_obj *o = params->q_obj;
5181         uint8_t cid_idx = params->params.cfc_del.cid_index;
5182
5183         if (cid_idx >= o->max_cos) {
5184                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5185                           o->cl_id, cid_idx);
5186                 return ECORE_INVAL;
5187         }
5188
5189         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5190                              o->cids[cid_idx], 0,
5191                              NONE_CONNECTION_TYPE);
5192 }
5193
5194 static inline int ecore_q_send_terminate(struct bxe_softc *sc,
5195                                         struct ecore_queue_state_params *params)
5196 {
5197         struct ecore_queue_sp_obj *o = params->q_obj;
5198         uint8_t cid_index = params->params.terminate.cid_index;
5199
5200         if (cid_index >= o->max_cos) {
5201                 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5202                           o->cl_id, cid_index);
5203                 return ECORE_INVAL;
5204         }
5205
5206         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5207                              o->cids[cid_index], 0,
5208                              ETH_CONNECTION_TYPE);
5209 }
5210
5211 static inline int ecore_q_send_empty(struct bxe_softc *sc,
5212                                      struct ecore_queue_state_params *params)
5213 {
5214         struct ecore_queue_sp_obj *o = params->q_obj;
5215
5216         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5217                              o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5218                              ETH_CONNECTION_TYPE);
5219 }
5220
5221 static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5222                                         struct ecore_queue_state_params *params)
5223 {
5224         switch (params->cmd) {
5225         case ECORE_Q_CMD_INIT:
5226                 return ecore_q_init(sc, params);
5227         case ECORE_Q_CMD_SETUP_TX_ONLY:
5228                 return ecore_q_send_setup_tx_only(sc, params);
5229         case ECORE_Q_CMD_DEACTIVATE:
5230                 return ecore_q_send_deactivate(sc, params);
5231         case ECORE_Q_CMD_ACTIVATE:
5232                 return ecore_q_send_activate(sc, params);
5233         case ECORE_Q_CMD_UPDATE:
5234                 return ecore_q_send_update(sc, params);
5235         case ECORE_Q_CMD_UPDATE_TPA:
5236                 return ecore_q_send_update_tpa(sc, params);
5237         case ECORE_Q_CMD_HALT:
5238                 return ecore_q_send_halt(sc, params);
5239         case ECORE_Q_CMD_CFC_DEL:
5240                 return ecore_q_send_cfc_del(sc, params);
5241         case ECORE_Q_CMD_TERMINATE:
5242                 return ecore_q_send_terminate(sc, params);
5243         case ECORE_Q_CMD_EMPTY:
5244                 return ecore_q_send_empty(sc, params);
5245         default:
5246                 ECORE_ERR("Unknown command: %d\n", params->cmd);
5247                 return ECORE_INVAL;
5248         }
5249 }
5250
5251 static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5252                                     struct ecore_queue_state_params *params)
5253 {
5254         switch (params->cmd) {
5255         case ECORE_Q_CMD_SETUP:
5256                 return ecore_q_send_setup_e1x(sc, params);
5257         case ECORE_Q_CMD_INIT:
5258         case ECORE_Q_CMD_SETUP_TX_ONLY:
5259         case ECORE_Q_CMD_DEACTIVATE:
5260         case ECORE_Q_CMD_ACTIVATE:
5261         case ECORE_Q_CMD_UPDATE:
5262         case ECORE_Q_CMD_UPDATE_TPA:
5263         case ECORE_Q_CMD_HALT:
5264         case ECORE_Q_CMD_CFC_DEL:
5265         case ECORE_Q_CMD_TERMINATE:
5266         case ECORE_Q_CMD_EMPTY:
5267                 return ecore_queue_send_cmd_cmn(sc, params);
5268         default:
5269                 ECORE_ERR("Unknown command: %d\n", params->cmd);
5270                 return ECORE_INVAL;
5271         }
5272 }
5273
5274 static int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5275                                    struct ecore_queue_state_params *params)
5276 {
5277         switch (params->cmd) {
5278         case ECORE_Q_CMD_SETUP:
5279                 return ecore_q_send_setup_e2(sc, params);
5280         case ECORE_Q_CMD_INIT:
5281         case ECORE_Q_CMD_SETUP_TX_ONLY:
5282         case ECORE_Q_CMD_DEACTIVATE:
5283         case ECORE_Q_CMD_ACTIVATE:
5284         case ECORE_Q_CMD_UPDATE:
5285         case ECORE_Q_CMD_UPDATE_TPA:
5286         case ECORE_Q_CMD_HALT:
5287         case ECORE_Q_CMD_CFC_DEL:
5288         case ECORE_Q_CMD_TERMINATE:
5289         case ECORE_Q_CMD_EMPTY:
5290                 return ecore_queue_send_cmd_cmn(sc, params);
5291         default:
5292                 ECORE_ERR("Unknown command: %d\n", params->cmd);
5293                 return ECORE_INVAL;
5294         }
5295 }
5296
5297 /**
5298  * ecore_queue_chk_transition - check state machine of a regular Queue
5299  *
5300  * @sc:         device handle
5301  * @o:
5302  * @params:
5303  *
5304  * (not Forwarding)
5305  * It both checks if the requested command is legal in a current
5306  * state and, if it's legal, sets a `next_state' in the object
5307  * that will be used in the completion flow to set the `state'
5308  * of the object.
5309  *
5310  * returns 0 if a requested command is a legal transition,
5311  *         ECORE_INVAL otherwise.
5312  */
5313 static int ecore_queue_chk_transition(struct bxe_softc *sc,
5314                                       struct ecore_queue_sp_obj *o,
5315                                       struct ecore_queue_state_params *params)
5316 {
5317         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5318         enum ecore_queue_cmd cmd = params->cmd;
5319         struct ecore_queue_update_params *update_params =
5320                  &params->params.update;
5321         uint8_t next_tx_only = o->num_tx_only;
5322
5323         /* Forget all pending for completion commands if a driver only state
5324          * transition has been requested.
5325          */
5326         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5327                 o->pending = 0;
5328                 o->next_state = ECORE_Q_STATE_MAX;
5329         }
5330
5331         /* Don't allow a next state transition if we are in the middle of
5332          * the previous one.
5333          */
5334         if (o->pending) {
5335                 ECORE_ERR("Blocking transition since pending was %lx\n",
5336                           o->pending);
5337                 return ECORE_BUSY;
5338         }
5339
5340         switch (state) {
5341         case ECORE_Q_STATE_RESET:
5342                 if (cmd == ECORE_Q_CMD_INIT)
5343                         next_state = ECORE_Q_STATE_INITIALIZED;
5344
5345                 break;
5346         case ECORE_Q_STATE_INITIALIZED:
5347                 if (cmd == ECORE_Q_CMD_SETUP) {
5348                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5349                                            &params->params.setup.flags))
5350                                 next_state = ECORE_Q_STATE_ACTIVE;
5351                         else
5352                                 next_state = ECORE_Q_STATE_INACTIVE;
5353                 }
5354
5355                 break;
5356         case ECORE_Q_STATE_ACTIVE:
5357                 if (cmd == ECORE_Q_CMD_DEACTIVATE)
5358                         next_state = ECORE_Q_STATE_INACTIVE;
5359
5360                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5361                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
5362                         next_state = ECORE_Q_STATE_ACTIVE;
5363
5364                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5365                         next_state = ECORE_Q_STATE_MULTI_COS;
5366                         next_tx_only = 1;
5367                 }
5368
5369                 else if (cmd == ECORE_Q_CMD_HALT)
5370                         next_state = ECORE_Q_STATE_STOPPED;
5371
5372                 else if (cmd == ECORE_Q_CMD_UPDATE) {
5373                         /* If "active" state change is requested, update the
5374                          *  state accordingly.
5375                          */
5376                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5377                                            &update_params->update_flags) &&
5378                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5379                                             &update_params->update_flags))
5380                                 next_state = ECORE_Q_STATE_INACTIVE;
5381                         else
5382                                 next_state = ECORE_Q_STATE_ACTIVE;
5383                 }
5384
5385                 break;
5386         case ECORE_Q_STATE_MULTI_COS:
5387                 if (cmd == ECORE_Q_CMD_TERMINATE)
5388                         next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5389
5390                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5391                         next_state = ECORE_Q_STATE_MULTI_COS;
5392                         next_tx_only = o->num_tx_only + 1;
5393                 }
5394
5395                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5396                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
5397                         next_state = ECORE_Q_STATE_MULTI_COS;
5398
5399                 else if (cmd == ECORE_Q_CMD_UPDATE) {
5400                         /* If "active" state change is requested, update the
5401                          *  state accordingly.
5402                          */
5403                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5404                                            &update_params->update_flags) &&
5405                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5406                                             &update_params->update_flags))
5407                                 next_state = ECORE_Q_STATE_INACTIVE;
5408                         else
5409                                 next_state = ECORE_Q_STATE_MULTI_COS;
5410                 }
5411
5412                 break;
5413         case ECORE_Q_STATE_MCOS_TERMINATED:
5414                 if (cmd == ECORE_Q_CMD_CFC_DEL) {
5415                         next_tx_only = o->num_tx_only - 1;
5416                         if (next_tx_only == 0)
5417                                 next_state = ECORE_Q_STATE_ACTIVE;
5418                         else
5419                                 next_state = ECORE_Q_STATE_MULTI_COS;
5420                 }
5421
5422                 break;
5423         case ECORE_Q_STATE_INACTIVE:
5424                 if (cmd == ECORE_Q_CMD_ACTIVATE)
5425                         next_state = ECORE_Q_STATE_ACTIVE;
5426
5427                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5428                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
5429                         next_state = ECORE_Q_STATE_INACTIVE;
5430
5431                 else if (cmd == ECORE_Q_CMD_HALT)
5432                         next_state = ECORE_Q_STATE_STOPPED;
5433
5434                 else if (cmd == ECORE_Q_CMD_UPDATE) {
5435                         /* If "active" state change is requested, update the
5436                          * state accordingly.
5437                          */
5438                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5439                                            &update_params->update_flags) &&
5440                             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5441                                            &update_params->update_flags)){
5442                                 if (o->num_tx_only == 0)
5443                                         next_state = ECORE_Q_STATE_ACTIVE;
5444                                 else /* tx only queues exist for this queue */
5445                                         next_state = ECORE_Q_STATE_MULTI_COS;
5446                         } else
5447                                 next_state = ECORE_Q_STATE_INACTIVE;
5448                 }
5449
5450                 break;
5451         case ECORE_Q_STATE_STOPPED:
5452                 if (cmd == ECORE_Q_CMD_TERMINATE)
5453                         next_state = ECORE_Q_STATE_TERMINATED;
5454
5455                 break;
5456         case ECORE_Q_STATE_TERMINATED:
5457                 if (cmd == ECORE_Q_CMD_CFC_DEL)
5458                         next_state = ECORE_Q_STATE_RESET;
5459
5460                 break;
5461         default:
5462                 ECORE_ERR("Illegal state: %d\n", state);
5463         }
5464
5465         /* Transition is assured */
5466         if (next_state != ECORE_Q_STATE_MAX) {
5467                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5468                           state, cmd, next_state);
5469                 o->next_state = next_state;
5470                 o->next_tx_only = next_tx_only;
5471                 return ECORE_SUCCESS;
5472         }
5473
5474         ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5475
5476         return ECORE_INVAL;
5477 }
5478
5479 /**
5480  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5481  *
5482  * @sc:         device handle
5483  * @o:
5484  * @params:
5485  *
5486  * It both checks if the requested command is legal in a current
5487  * state and, if it's legal, sets a `next_state' in the object
5488  * that will be used in the completion flow to set the `state'
5489  * of the object.
5490  *
5491  * returns 0 if a requested command is a legal transition,
5492  *         ECORE_INVAL otherwise.
5493  */
5494 static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5495                                           struct ecore_queue_sp_obj *o,
5496                                         struct ecore_queue_state_params *params)
5497 {
5498         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5499         enum ecore_queue_cmd cmd = params->cmd;
5500
5501         switch (state) {
5502         case ECORE_Q_STATE_RESET:
5503                 if (cmd == ECORE_Q_CMD_INIT)
5504                         next_state = ECORE_Q_STATE_INITIALIZED;
5505
5506                 break;
5507         case ECORE_Q_STATE_INITIALIZED:
5508                 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5509                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5510                                            &params->params.tx_only.flags))
5511                                 next_state = ECORE_Q_STATE_ACTIVE;
5512                         else
5513                                 next_state = ECORE_Q_STATE_INACTIVE;
5514                 }
5515
5516                 break;
5517         case ECORE_Q_STATE_ACTIVE:
5518         case ECORE_Q_STATE_INACTIVE:
5519                 if (cmd == ECORE_Q_CMD_CFC_DEL)
5520                         next_state = ECORE_Q_STATE_RESET;
5521
5522                 break;
5523         default:
5524                 ECORE_ERR("Illegal state: %d\n", state);
5525         }
5526
5527         /* Transition is assured */
5528         if (next_state != ECORE_Q_STATE_MAX) {
5529                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5530                           state, cmd, next_state);
5531                 o->next_state = next_state;
5532                 return ECORE_SUCCESS;
5533         }
5534
5535         ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5536         return ECORE_INVAL;
5537 }
5538
5539 void ecore_init_queue_obj(struct bxe_softc *sc,
5540                           struct ecore_queue_sp_obj *obj,
5541                           uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5542                           void *rdata,
5543                           ecore_dma_addr_t rdata_mapping, unsigned long type)
5544 {
5545         ECORE_MEMSET(obj, 0, sizeof(*obj));
5546
5547         /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5548         ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5549
5550         memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5551         obj->max_cos = cid_cnt;
5552         obj->cl_id = cl_id;
5553         obj->func_id = func_id;
5554         obj->rdata = rdata;
5555         obj->rdata_mapping = rdata_mapping;
5556         obj->type = type;
5557         obj->next_state = ECORE_Q_STATE_MAX;
5558
5559         if (CHIP_IS_E1x(sc))
5560                 obj->send_cmd = ecore_queue_send_cmd_e1x;
5561         else
5562                 obj->send_cmd = ecore_queue_send_cmd_e2;
5563
5564         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5565                 obj->check_transition = ecore_queue_chk_fwd_transition;
5566         else
5567         obj->check_transition = ecore_queue_chk_transition;
5568
5569         obj->complete_cmd = ecore_queue_comp_cmd;
5570         obj->wait_comp = ecore_queue_wait_comp;
5571         obj->set_pending = ecore_queue_set_pending;
5572 }
5573
5574 /* return a queue object's logical state*/
5575 int ecore_get_q_logical_state(struct bxe_softc *sc,
5576                                struct ecore_queue_sp_obj *obj)
5577 {
5578         switch (obj->state) {
5579         case ECORE_Q_STATE_ACTIVE:
5580         case ECORE_Q_STATE_MULTI_COS:
5581                 return ECORE_Q_LOGICAL_STATE_ACTIVE;
5582         case ECORE_Q_STATE_RESET:
5583         case ECORE_Q_STATE_INITIALIZED:
5584         case ECORE_Q_STATE_MCOS_TERMINATED:
5585         case ECORE_Q_STATE_INACTIVE:
5586         case ECORE_Q_STATE_STOPPED:
5587         case ECORE_Q_STATE_TERMINATED:
5588         case ECORE_Q_STATE_FLRED:
5589                 return ECORE_Q_LOGICAL_STATE_STOPPED;
5590         default:
5591                 return ECORE_INVAL;
5592         }
5593 }
5594
5595 /********************** Function state object *********************************/
5596 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5597                                            struct ecore_func_sp_obj *o)
5598 {
5599         /* in the middle of transaction - return INVALID state */
5600         if (o->pending)
5601                 return ECORE_F_STATE_MAX;
5602
5603         /* unsure the order of reading of o->pending and o->state
5604          * o->pending should be read first
5605          */
5606         rmb();
5607
5608         return o->state;
5609 }
5610
5611 static int ecore_func_wait_comp(struct bxe_softc *sc,
5612                                 struct ecore_func_sp_obj *o,
5613                                 enum ecore_func_cmd cmd)
5614 {
5615         return ecore_state_wait(sc, cmd, &o->pending);
5616 }
5617
5618 /**
5619  * ecore_func_state_change_comp - complete the state machine transition
5620  *
5621  * @sc:         device handle
5622  * @o:
5623  * @cmd:
5624  *
5625  * Called on state change transition. Completes the state
5626  * machine transition only - no HW interaction.
5627  */
5628 static inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5629                                                struct ecore_func_sp_obj *o,
5630                                                enum ecore_func_cmd cmd)
5631 {
5632         unsigned long cur_pending = o->pending;
5633
5634         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5635                 ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5636                           cmd, ECORE_FUNC_ID(sc), o->state,
5637                           cur_pending, o->next_state);
5638                 return ECORE_INVAL;
5639         }
5640
5641         ECORE_MSG(sc,
5642                   "Completing command %d for func %d, setting state to %d\n",
5643                   cmd, ECORE_FUNC_ID(sc), o->next_state);
5644
5645         o->state = o->next_state;
5646         o->next_state = ECORE_F_STATE_MAX;
5647
5648         /* It's important that o->state and o->next_state are
5649          * updated before o->pending.
5650          */
5651         wmb();
5652
5653         ECORE_CLEAR_BIT(cmd, &o->pending);
5654         ECORE_SMP_MB_AFTER_CLEAR_BIT();
5655
5656         return ECORE_SUCCESS;
5657 }
5658
5659 /**
5660  * ecore_func_comp_cmd - complete the state change command
5661  *
5662  * @sc:         device handle
5663  * @o:
5664  * @cmd:
5665  *
5666  * Checks that the arrived completion is expected.
5667  */
5668 static int ecore_func_comp_cmd(struct bxe_softc *sc,
5669                                struct ecore_func_sp_obj *o,
5670                                enum ecore_func_cmd cmd)
5671 {
5672         /* Complete the state machine part first, check if it's a
5673          * legal completion.
5674          */
5675         int rc = ecore_func_state_change_comp(sc, o, cmd);
5676         return rc;
5677 }
5678
5679 /**
5680  * ecore_func_chk_transition - perform function state machine transition
5681  *
5682  * @sc:         device handle
5683  * @o:
5684  * @params:
5685  *
5686  * It both checks if the requested command is legal in a current
5687  * state and, if it's legal, sets a `next_state' in the object
5688  * that will be used in the completion flow to set the `state'
5689  * of the object.
5690  *
5691  * returns 0 if a requested command is a legal transition,
5692  *         ECORE_INVAL otherwise.
5693  */
5694 static int ecore_func_chk_transition(struct bxe_softc *sc,
5695                                      struct ecore_func_sp_obj *o,
5696                                      struct ecore_func_state_params *params)
5697 {
5698         enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5699         enum ecore_func_cmd cmd = params->cmd;
5700
5701         /* Forget all pending for completion commands if a driver only state
5702          * transition has been requested.
5703          */
5704         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5705                 o->pending = 0;
5706                 o->next_state = ECORE_F_STATE_MAX;
5707         }
5708
5709         /* Don't allow a next state transition if we are in the middle of
5710          * the previous one.
5711          */
5712         if (o->pending)
5713                 return ECORE_BUSY;
5714
5715         switch (state) {
5716         case ECORE_F_STATE_RESET:
5717                 if (cmd == ECORE_F_CMD_HW_INIT)
5718                         next_state = ECORE_F_STATE_INITIALIZED;
5719
5720                 break;
5721         case ECORE_F_STATE_INITIALIZED:
5722                 if (cmd == ECORE_F_CMD_START)
5723                         next_state = ECORE_F_STATE_STARTED;
5724
5725                 else if (cmd == ECORE_F_CMD_HW_RESET)
5726                         next_state = ECORE_F_STATE_RESET;
5727
5728                 break;
5729         case ECORE_F_STATE_STARTED:
5730                 if (cmd == ECORE_F_CMD_STOP)
5731                         next_state = ECORE_F_STATE_INITIALIZED;
5732                 /* afex ramrods can be sent only in started mode, and only
5733                  * if not pending for function_stop ramrod completion
5734                  * for these events - next state remained STARTED.
5735                  */
5736                 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5737                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5738                         next_state = ECORE_F_STATE_STARTED;
5739
5740                 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5741                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5742                         next_state = ECORE_F_STATE_STARTED;
5743
5744                 /* Switch_update ramrod can be sent in either started or
5745                  * tx_stopped state, and it doesn't change the state.
5746                  */
5747                 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5748                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5749                         next_state = ECORE_F_STATE_STARTED;
5750
5751                 else if (cmd == ECORE_F_CMD_TX_STOP)
5752                         next_state = ECORE_F_STATE_TX_STOPPED;
5753
5754                 break;
5755         case ECORE_F_STATE_TX_STOPPED:
5756                 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5757                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5758                         next_state = ECORE_F_STATE_TX_STOPPED;
5759
5760                 else if (cmd == ECORE_F_CMD_TX_START)
5761                         next_state = ECORE_F_STATE_STARTED;
5762
5763                 break;
5764         default:
5765                 ECORE_ERR("Unknown state: %d\n", state);
5766         }
5767
5768         /* Transition is assured */
5769         if (next_state != ECORE_F_STATE_MAX) {
5770                 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5771                           state, cmd, next_state);
5772                 o->next_state = next_state;
5773                 return ECORE_SUCCESS;
5774         }
5775
5776         ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5777                   state, cmd);
5778
5779         return ECORE_INVAL;
5780 }
5781
5782 /**
5783  * ecore_func_init_func - performs HW init at function stage
5784  *
5785  * @sc:         device handle
5786  * @drv:
5787  *
5788  * Init HW when the current phase is
5789  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5790  * HW blocks.
5791  */
5792 static inline int ecore_func_init_func(struct bxe_softc *sc,
5793                                        const struct ecore_func_sp_drv_ops *drv)
5794 {
5795         return drv->init_hw_func(sc);
5796 }
5797
5798 /**
5799  * ecore_func_init_port - performs HW init at port stage
5800  *
5801  * @sc:         device handle
5802  * @drv:
5803  *
5804  * Init HW when the current phase is
5805  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5806  * FUNCTION-only HW blocks.
5807  *
5808  */
5809 static inline int ecore_func_init_port(struct bxe_softc *sc,
5810                                        const struct ecore_func_sp_drv_ops *drv)
5811 {
5812         int rc = drv->init_hw_port(sc);
5813         if (rc)
5814                 return rc;
5815
5816         return ecore_func_init_func(sc, drv);
5817 }
5818
5819 /**
5820  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
5821  *
5822  * @sc:         device handle
5823  * @drv:
5824  *
5825  * Init HW when the current phase is
5826  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5827  * PORT-only and FUNCTION-only HW blocks.
5828  */
5829 static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
5830                                         const struct ecore_func_sp_drv_ops *drv)
5831 {
5832         int rc = drv->init_hw_cmn_chip(sc);
5833         if (rc)
5834                 return rc;
5835
5836         return ecore_func_init_port(sc, drv);
5837 }
5838
5839 /**
5840  * ecore_func_init_cmn - performs HW init at common stage
5841  *
5842  * @sc:         device handle
5843  * @drv:
5844  *
5845  * Init HW when the current phase is
5846  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5847  * PORT-only and FUNCTION-only HW blocks.
5848  */
5849 static inline int ecore_func_init_cmn(struct bxe_softc *sc,
5850                                       const struct ecore_func_sp_drv_ops *drv)
5851 {
5852         int rc = drv->init_hw_cmn(sc);
5853         if (rc)
5854                 return rc;
5855
5856         return ecore_func_init_port(sc, drv);
5857 }
5858
5859 static int ecore_func_hw_init(struct bxe_softc *sc,
5860                               struct ecore_func_state_params *params)
5861 {
5862         uint32_t load_code = params->params.hw_init.load_phase;
5863         struct ecore_func_sp_obj *o = params->f_obj;
5864         const struct ecore_func_sp_drv_ops *drv = o->drv;
5865         int rc = 0;
5866
5867         ECORE_MSG(sc, "function %d  load_code %x\n",
5868                   ECORE_ABS_FUNC_ID(sc), load_code);
5869
5870         /* Prepare buffers for unzipping the FW */
5871         rc = drv->gunzip_init(sc);
5872         if (rc)
5873                 return rc;
5874
5875         /* Prepare FW */
5876         rc = drv->init_fw(sc);
5877         if (rc) {
5878                 ECORE_ERR("Error loading firmware\n");
5879                 goto init_err;
5880         }
5881
5882         /* Handle the beginning of COMMON_XXX pases separately... */
5883         switch (load_code) {
5884         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5885                 rc = ecore_func_init_cmn_chip(sc, drv);
5886                 if (rc)
5887                         goto init_err;
5888
5889                 break;
5890         case FW_MSG_CODE_DRV_LOAD_COMMON:
5891                 rc = ecore_func_init_cmn(sc, drv);
5892                 if (rc)
5893                         goto init_err;
5894
5895                 break;
5896         case FW_MSG_CODE_DRV_LOAD_PORT:
5897                 rc = ecore_func_init_port(sc, drv);
5898                 if (rc)
5899                         goto init_err;
5900
5901                 break;
5902         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5903                 rc = ecore_func_init_func(sc, drv);
5904                 if (rc)
5905                         goto init_err;
5906
5907                 break;
5908         default:
5909                 ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5910                 rc = ECORE_INVAL;
5911         }
5912
5913 init_err:
5914         drv->gunzip_end(sc);
5915
5916         /* In case of success, complete the command immediately: no ramrods
5917          * have been sent.
5918          */
5919         if (!rc)
5920                 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
5921
5922         return rc;
5923 }
5924
5925 /**
5926  * ecore_func_reset_func - reset HW at function stage
5927  *
5928  * @sc:         device handle
5929  * @drv:
5930  *
5931  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5932  * FUNCTION-only HW blocks.
5933  */
5934 static inline void ecore_func_reset_func(struct bxe_softc *sc,
5935                                         const struct ecore_func_sp_drv_ops *drv)
5936 {
5937         drv->reset_hw_func(sc);
5938 }
5939
5940 /**
5941  * ecore_func_reset_port - reser HW at port stage
5942  *
5943  * @sc:         device handle
5944  * @drv:
5945  *
5946  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5947  * FUNCTION-only and PORT-only HW blocks.
5948  *
5949  *                 !!!IMPORTANT!!!
5950  *
5951  * It's important to call reset_port before reset_func() as the last thing
5952  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5953  * makes impossible any DMAE transactions.
5954  */
5955 static inline void ecore_func_reset_port(struct bxe_softc *sc,
5956                                         const struct ecore_func_sp_drv_ops *drv)
5957 {
5958         drv->reset_hw_port(sc);
5959         ecore_func_reset_func(sc, drv);
5960 }
5961
5962 /**
5963  * ecore_func_reset_cmn - reser HW at common stage
5964  *
5965  * @sc:         device handle
5966  * @drv:
5967  *
5968  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5969  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5970  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5971  */
5972 static inline void ecore_func_reset_cmn(struct bxe_softc *sc,
5973                                         const struct ecore_func_sp_drv_ops *drv)
5974 {
5975         ecore_func_reset_port(sc, drv);
5976         drv->reset_hw_cmn(sc);
5977 }
5978
5979 static inline int ecore_func_hw_reset(struct bxe_softc *sc,
5980                                       struct ecore_func_state_params *params)
5981 {
5982         uint32_t reset_phase = params->params.hw_reset.reset_phase;
5983         struct ecore_func_sp_obj *o = params->f_obj;
5984         const struct ecore_func_sp_drv_ops *drv = o->drv;
5985
5986         ECORE_MSG(sc, "function %d  reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
5987                   reset_phase);
5988
5989         switch (reset_phase) {
5990         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5991                 ecore_func_reset_cmn(sc, drv);
5992                 break;
5993         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5994                 ecore_func_reset_port(sc, drv);
5995                 break;
5996         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5997                 ecore_func_reset_func(sc, drv);
5998                 break;
5999         default:
6000                 ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
6001                           reset_phase);
6002                 break;
6003         }
6004
6005         /* Complete the command immediately: no ramrods have been sent. */
6006         o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
6007
6008         return ECORE_SUCCESS;
6009 }
6010
6011 static inline int ecore_func_send_start(struct bxe_softc *sc,
6012                                         struct ecore_func_state_params *params)
6013 {
6014         struct ecore_func_sp_obj *o = params->f_obj;
6015         struct function_start_data *rdata =
6016                 (struct function_start_data *)o->rdata;
6017         ecore_dma_addr_t data_mapping = o->rdata_mapping;
6018         struct ecore_func_start_params *start_params = &params->params.start;
6019
6020         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6021
6022         /* Fill the ramrod data with provided parameters */
6023         rdata->function_mode    = (uint8_t)start_params->mf_mode;
6024         rdata->sd_vlan_tag      = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
6025         rdata->path_id          = ECORE_PATH_ID(sc);
6026         rdata->network_cos_mode = start_params->network_cos_mode;
6027         rdata->gre_tunnel_mode  = start_params->gre_tunnel_mode;
6028         rdata->gre_tunnel_rss   = start_params->gre_tunnel_rss;
6029
6030         /*
6031          *  No need for an explicit memory barrier here as long we would
6032          *  need to ensure the ordering of writing to the SPQ element
6033          *  and updating of the SPQ producer which involves a memory
6034          *  read and we will have to put a full memory barrier there
6035          *  (inside ecore_sp_post()).
6036          */
6037
6038         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6039                              data_mapping, NONE_CONNECTION_TYPE);
6040 }
6041
6042 static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6043                                         struct ecore_func_state_params *params)
6044 {
6045         struct ecore_func_sp_obj *o = params->f_obj;
6046         struct function_update_data *rdata =
6047                 (struct function_update_data *)o->rdata;
6048         ecore_dma_addr_t data_mapping = o->rdata_mapping;
6049         struct ecore_func_switch_update_params *switch_update_params =
6050                 &params->params.switch_update;
6051
6052         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6053
6054         /* Fill the ramrod data with provided parameters */
6055         rdata->tx_switch_suspend_change_flg = 1;
6056         rdata->tx_switch_suspend = switch_update_params->suspend;
6057         rdata->echo = SWITCH_UPDATE;
6058
6059         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6060                              data_mapping, NONE_CONNECTION_TYPE);
6061 }
6062
6063 static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6064                                          struct ecore_func_state_params *params)
6065 {
6066         struct ecore_func_sp_obj *o = params->f_obj;
6067         struct function_update_data *rdata =
6068                 (struct function_update_data *)o->afex_rdata;
6069         ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6070         struct ecore_func_afex_update_params *afex_update_params =
6071                 &params->params.afex_update;
6072
6073         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6074
6075         /* Fill the ramrod data with provided parameters */
6076         rdata->vif_id_change_flg = 1;
6077         rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6078         rdata->afex_default_vlan_change_flg = 1;
6079         rdata->afex_default_vlan =
6080                 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6081         rdata->allowed_priorities_change_flg = 1;
6082         rdata->allowed_priorities = afex_update_params->allowed_priorities;
6083         rdata->echo = AFEX_UPDATE;
6084
6085         /*  No need for an explicit memory barrier here as long we would
6086          *  need to ensure the ordering of writing to the SPQ element
6087          *  and updating of the SPQ producer which involves a memory
6088          *  read and we will have to put a full memory barrier there
6089          *  (inside ecore_sp_post()).
6090          */
6091         ECORE_MSG(sc,
6092                   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6093                   rdata->vif_id,
6094                   rdata->afex_default_vlan, rdata->allowed_priorities);
6095
6096         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6097                              data_mapping, NONE_CONNECTION_TYPE);
6098 }
6099
6100 static
6101 inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6102                                          struct ecore_func_state_params *params)
6103 {
6104         struct ecore_func_sp_obj *o = params->f_obj;
6105         struct afex_vif_list_ramrod_data *rdata =
6106                 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
6107         struct ecore_func_afex_viflists_params *afex_vif_params =
6108                 &params->params.afex_viflists;
6109         uint64_t *p_rdata = (uint64_t *)rdata;
6110
6111         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6112
6113         /* Fill the ramrod data with provided parameters */
6114         rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6115         rdata->func_bit_map          = afex_vif_params->func_bit_map;
6116         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6117         rdata->func_to_clear         = afex_vif_params->func_to_clear;
6118
6119         /* send in echo type of sub command */
6120         rdata->echo = afex_vif_params->afex_vif_list_command;
6121
6122         /*  No need for an explicit memory barrier here as long we would
6123          *  need to ensure the ordering of writing to the SPQ element
6124          *  and updating of the SPQ producer which involves a memory
6125          *  read and we will have to put a full memory barrier there
6126          *  (inside ecore_sp_post()).
6127          */
6128
6129         ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6130                   rdata->afex_vif_list_command, rdata->vif_list_index,
6131                   rdata->func_bit_map, rdata->func_to_clear);
6132
6133         /* this ramrod sends data directly and not through DMA mapping */
6134         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6135                              *p_rdata, NONE_CONNECTION_TYPE);
6136 }
6137
6138 static inline int ecore_func_send_stop(struct bxe_softc *sc,
6139                                        struct ecore_func_state_params *params)
6140 {
6141         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6142                              NONE_CONNECTION_TYPE);
6143 }
6144
6145 static inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6146                                        struct ecore_func_state_params *params)
6147 {
6148         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6149                              NONE_CONNECTION_TYPE);
6150 }
6151 static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6152                                        struct ecore_func_state_params *params)
6153 {
6154         struct ecore_func_sp_obj *o = params->f_obj;
6155         struct flow_control_configuration *rdata =
6156                 (struct flow_control_configuration *)o->rdata;
6157         ecore_dma_addr_t data_mapping = o->rdata_mapping;
6158         struct ecore_func_tx_start_params *tx_start_params =
6159                 &params->params.tx_start;
6160         int i;
6161
6162         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6163
6164         rdata->dcb_enabled = tx_start_params->dcb_enabled;
6165         rdata->dcb_version = tx_start_params->dcb_version;
6166         rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6167
6168         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6169                 rdata->traffic_type_to_priority_cos[i] =
6170                         tx_start_params->traffic_type_to_priority_cos[i];
6171
6172         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6173                              data_mapping, NONE_CONNECTION_TYPE);
6174 }
6175
6176 static int ecore_func_send_cmd(struct bxe_softc *sc,
6177                                struct ecore_func_state_params *params)
6178 {
6179         switch (params->cmd) {
6180         case ECORE_F_CMD_HW_INIT:
6181                 return ecore_func_hw_init(sc, params);
6182         case ECORE_F_CMD_START:
6183                 return ecore_func_send_start(sc, params);
6184         case ECORE_F_CMD_STOP:
6185                 return ecore_func_send_stop(sc, params);
6186         case ECORE_F_CMD_HW_RESET:
6187                 return ecore_func_hw_reset(sc, params);
6188         case ECORE_F_CMD_AFEX_UPDATE:
6189                 return ecore_func_send_afex_update(sc, params);
6190         case ECORE_F_CMD_AFEX_VIFLISTS:
6191                 return ecore_func_send_afex_viflists(sc, params);
6192         case ECORE_F_CMD_TX_STOP:
6193                 return ecore_func_send_tx_stop(sc, params);
6194         case ECORE_F_CMD_TX_START:
6195                 return ecore_func_send_tx_start(sc, params);
6196         case ECORE_F_CMD_SWITCH_UPDATE:
6197                 return ecore_func_send_switch_update(sc, params);
6198         default:
6199                 ECORE_ERR("Unknown command: %d\n", params->cmd);
6200                 return ECORE_INVAL;
6201         }
6202 }
6203
6204 void ecore_init_func_obj(struct bxe_softc *sc,
6205                          struct ecore_func_sp_obj *obj,
6206                          void *rdata, ecore_dma_addr_t rdata_mapping,
6207                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6208                          struct ecore_func_sp_drv_ops *drv_iface)
6209 {
6210         ECORE_MEMSET(obj, 0, sizeof(*obj));
6211
6212         ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6213
6214         obj->rdata = rdata;
6215         obj->rdata_mapping = rdata_mapping;
6216         obj->afex_rdata = afex_rdata;
6217         obj->afex_rdata_mapping = afex_rdata_mapping;
6218         obj->send_cmd = ecore_func_send_cmd;
6219         obj->check_transition = ecore_func_chk_transition;
6220         obj->complete_cmd = ecore_func_comp_cmd;
6221         obj->wait_comp = ecore_func_wait_comp;
6222         obj->drv = drv_iface;
6223 }
6224
6225 /**
6226  * ecore_func_state_change - perform Function state change transition
6227  *
6228  * @sc:         device handle
6229  * @params:     parameters to perform the transaction
6230  *
6231  * returns 0 in case of successfully completed transition,
6232  *         negative error code in case of failure, positive
6233  *         (EBUSY) value if there is a completion to that is
6234  *         still pending (possible only if RAMROD_COMP_WAIT is
6235  *         not set in params->ramrod_flags for asynchronous
6236  *         commands).
6237  */
6238 int ecore_func_state_change(struct bxe_softc *sc,
6239                             struct ecore_func_state_params *params)
6240 {
6241         struct ecore_func_sp_obj *o = params->f_obj;
6242         int rc, cnt = 300;
6243         enum ecore_func_cmd cmd = params->cmd;
6244         unsigned long *pending = &o->pending;
6245
6246         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6247
6248         /* Check that the requested transition is legal */
6249         rc = o->check_transition(sc, o, params);
6250         if ((rc == ECORE_BUSY) &&
6251             (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
6252                 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6253                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6254                         ECORE_MSLEEP(10);
6255                         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6256                         rc = o->check_transition(sc, o, params);
6257                 }
6258                 if (rc == ECORE_BUSY) {
6259                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6260                         ECORE_ERR("timeout waiting for previous ramrod completion\n");
6261                         return rc;
6262                 }
6263         } else if (rc) {
6264                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6265                 return rc;
6266         }
6267
6268         /* Set "pending" bit */
6269         ECORE_SET_BIT(cmd, pending);
6270
6271         /* Don't send a command if only driver cleanup was requested */
6272         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6273                 ecore_func_state_change_comp(sc, o, cmd);
6274                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6275         } else {
6276                 /* Send a ramrod */
6277                 rc = o->send_cmd(sc, params);
6278
6279                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6280
6281                 if (rc) {
6282                         o->next_state = ECORE_F_STATE_MAX;
6283                         ECORE_CLEAR_BIT(cmd, pending);
6284                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
6285                         return rc;
6286                 }
6287
6288                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6289                         rc = o->wait_comp(sc, o, cmd);
6290                         if (rc)
6291                                 return rc;
6292
6293                         return ECORE_SUCCESS;
6294                 }
6295         }
6296
6297         return ECORE_RET_PENDING(cmd, pending);
6298 }