2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "ecore_init.h"
33 /**** Exe Queue interfaces ****/
36 * ecore_exe_queue_init - init the Exe Queue object
38 * @o: pointer to the object
40 * @owner: pointer to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void ecore_exe_queue_init(struct bxe_softc *sc,
47 struct ecore_exe_queue_obj *o,
49 union ecore_qable_obj *owner,
50 exe_q_validate validate,
52 exe_q_optimize optimize,
56 ECORE_MEMSET(o, 0, sizeof(*o));
58 ECORE_LIST_INIT(&o->exe_queue);
59 ECORE_LIST_INIT(&o->pending_comp);
61 ECORE_SPIN_LOCK_INIT(&o->lock, sc);
63 o->exe_chunk_len = exe_len;
66 /* Owner specific callbacks */
67 o->validate = validate;
69 o->optimize = optimize;
73 ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
77 static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
78 struct ecore_exeq_elem *elem)
80 ECORE_MSG(sc, "Deleting an exe_queue element\n");
81 ECORE_FREE(sc, elem, sizeof(*elem));
84 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
86 struct ecore_exeq_elem *elem;
89 ECORE_SPIN_LOCK_BH(&o->lock);
91 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
92 struct ecore_exeq_elem)
95 ECORE_SPIN_UNLOCK_BH(&o->lock);
101 * ecore_exe_queue_add - add a new element to the execution queue
105 * @cmd: new command to add
106 * @restore: true - do not optimize the command
108 * If the element is optimized or is illegal, frees it.
110 static inline int ecore_exe_queue_add(struct bxe_softc *sc,
111 struct ecore_exe_queue_obj *o,
112 struct ecore_exeq_elem *elem,
117 ECORE_SPIN_LOCK_BH(&o->lock);
120 /* Try to cancel this element queue */
121 rc = o->optimize(sc, o->owner, elem);
125 /* Check if this request is ok */
126 rc = o->validate(sc, o->owner, elem);
128 ECORE_MSG(sc, "Preamble failed: %d\n", rc);
133 /* If so, add it to the execution queue */
134 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
136 ECORE_SPIN_UNLOCK_BH(&o->lock);
138 return ECORE_SUCCESS;
141 ecore_exe_queue_free_elem(sc, elem);
143 ECORE_SPIN_UNLOCK_BH(&o->lock);
148 static inline void __ecore_exe_queue_reset_pending(
149 struct bxe_softc *sc,
150 struct ecore_exe_queue_obj *o)
152 struct ecore_exeq_elem *elem;
154 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
155 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
156 struct ecore_exeq_elem,
159 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
160 ecore_exe_queue_free_elem(sc, elem);
165 * ecore_exe_queue_step - execute one execution chunk atomically
169 * @ramrod_flags: flags
171 * (Should be called while holding the exe_queue->lock).
173 static inline int ecore_exe_queue_step(struct bxe_softc *sc,
174 struct ecore_exe_queue_obj *o,
175 unsigned long *ramrod_flags)
177 struct ecore_exeq_elem *elem, spacer;
180 ECORE_MEMSET(&spacer, 0, sizeof(spacer));
182 /* Next step should not be performed until the current is finished,
183 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
184 * properly clear object internals without sending any command to the FW
185 * which also implies there won't be any completion to clear the
188 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
189 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
190 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
191 __ecore_exe_queue_reset_pending(sc, o);
193 return ECORE_PENDING;
197 /* Run through the pending commands list and create a next
200 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
201 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
202 struct ecore_exeq_elem,
204 ECORE_DBG_BREAK_IF(!elem->cmd_len);
206 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
207 cur_len += elem->cmd_len;
208 /* Prevent from both lists being empty when moving an
209 * element. This will allow the call of
210 * ecore_exe_queue_empty() without locking.
212 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
214 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
215 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
216 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
223 return ECORE_SUCCESS;
225 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
227 /* In case of an error return the commands back to the queue
228 * and reset the pending_comp.
230 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
232 /* If zero is returned, means there are no outstanding pending
233 * completions and we may dismiss the pending list.
235 __ecore_exe_queue_reset_pending(sc, o);
240 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
242 bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
244 /* Don't reorder!!! */
247 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
250 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
251 struct bxe_softc *sc)
253 ECORE_MSG(sc, "Allocating a new exe_queue element\n");
254 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
258 /************************ raw_obj functions ***********************************/
259 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
262 * !! converts the value returned by ECORE_TEST_BIT such that it
263 * is guaranteed not to be truncated regardless of bool definition.
265 * Note we cannot simply define the function's return value type
266 * to match the type returned by ECORE_TEST_BIT, as it varies by
267 * platform/implementation.
270 return !!ECORE_TEST_BIT(o->state, o->pstate);
273 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
275 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
276 ECORE_CLEAR_BIT(o->state, o->pstate);
277 ECORE_SMP_MB_AFTER_CLEAR_BIT();
280 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
282 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
283 ECORE_SET_BIT(o->state, o->pstate);
284 ECORE_SMP_MB_AFTER_CLEAR_BIT();
288 * ecore_state_wait - wait until the given bit(state) is cleared
291 * @state: state which is to be cleared
292 * @state_p: state buffer
295 static inline int ecore_state_wait(struct bxe_softc *sc, int state,
296 unsigned long *pstate)
298 /* can take a while if any port is running */
302 if (CHIP_REV_IS_EMUL(sc))
305 ECORE_MSG(sc, "waiting for state to become %d\n", state);
309 if (!ECORE_TEST_BIT(state, pstate)) {
310 #ifdef ECORE_STOP_ON_ERROR
311 ECORE_MSG(sc, "exit (cnt %d)\n", 5000 - cnt);
313 return ECORE_SUCCESS;
316 ECORE_WAIT(sc, delay_us);
323 ECORE_ERR("timeout waiting for state %d\n", state);
324 #ifdef ECORE_STOP_ON_ERROR
328 return ECORE_TIMEOUT;
331 static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
333 return ecore_state_wait(sc, raw->state, raw->pstate);
336 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
337 /* credit handling callbacks */
338 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
340 struct ecore_credit_pool_obj *mp = o->macs_pool;
342 ECORE_DBG_BREAK_IF(!mp);
344 return mp->get_entry(mp, offset);
347 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
349 struct ecore_credit_pool_obj *mp = o->macs_pool;
351 ECORE_DBG_BREAK_IF(!mp);
353 return mp->get(mp, 1);
356 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
358 struct ecore_credit_pool_obj *vp = o->vlans_pool;
360 ECORE_DBG_BREAK_IF(!vp);
362 return vp->get_entry(vp, offset);
365 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
367 struct ecore_credit_pool_obj *vp = o->vlans_pool;
369 ECORE_DBG_BREAK_IF(!vp);
371 return vp->get(vp, 1);
374 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
376 struct ecore_credit_pool_obj *mp = o->macs_pool;
377 struct ecore_credit_pool_obj *vp = o->vlans_pool;
382 if (!vp->get(vp, 1)) {
390 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
392 struct ecore_credit_pool_obj *mp = o->macs_pool;
394 return mp->put_entry(mp, offset);
397 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
399 struct ecore_credit_pool_obj *mp = o->macs_pool;
401 return mp->put(mp, 1);
404 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
406 struct ecore_credit_pool_obj *vp = o->vlans_pool;
408 return vp->put_entry(vp, offset);
411 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
413 struct ecore_credit_pool_obj *vp = o->vlans_pool;
415 return vp->put(vp, 1);
418 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
420 struct ecore_credit_pool_obj *mp = o->macs_pool;
421 struct ecore_credit_pool_obj *vp = o->vlans_pool;
426 if (!vp->put(vp, 1)) {
435 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
439 * @o: vlan_mac object
441 * @details: Non-blocking implementation; should be called under execution
444 static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
445 struct ecore_vlan_mac_obj *o)
447 if (o->head_reader) {
448 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
452 ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
453 return ECORE_SUCCESS;
457 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
458 * which wasn't able to run due to a taken lock on vlan mac head list.
461 * @o: vlan_mac object
463 * @details Should be called under execution queue lock; notice it might release
464 * and reclaim it during its run.
466 static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
467 struct ecore_vlan_mac_obj *o)
470 unsigned long ramrod_flags = o->saved_ramrod_flags;
472 ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
474 o->head_exe_request = FALSE;
475 o->saved_ramrod_flags = 0;
476 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
477 if ((rc != ECORE_SUCCESS) && (rc != ECORE_PENDING)) {
478 ECORE_ERR("execution of pending commands failed with rc %d\n",
480 #ifdef ECORE_STOP_ON_ERROR
487 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
488 * called due to vlan mac head list lock being taken.
491 * @o: vlan_mac object
492 * @ramrod_flags: ramrod flags of missed execution
494 * @details Should be called under execution queue lock.
496 static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
497 struct ecore_vlan_mac_obj *o,
498 unsigned long ramrod_flags)
500 o->head_exe_request = TRUE;
501 o->saved_ramrod_flags = ramrod_flags;
502 ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
507 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
510 * @o: vlan_mac object
512 * @details Should be called under execution queue lock. Notice if a pending
513 * execution exists, it would perform it - possibly releasing and
514 * reclaiming the execution queue lock.
516 static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
517 struct ecore_vlan_mac_obj *o)
519 /* It's possible a new pending execution was added since this writer
520 * executed. If so, execute again. [Ad infinitum]
522 while(o->head_exe_request) {
523 ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
524 __ecore_vlan_mac_h_exec_pending(sc, o);
529 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
532 * @o: vlan_mac object
534 * @details Notice if a pending execution exists, it would perform it -
535 * possibly releasing and reclaiming the execution queue lock.
537 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
538 struct ecore_vlan_mac_obj *o)
540 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
541 __ecore_vlan_mac_h_write_unlock(sc, o);
542 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
546 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
549 * @o: vlan_mac object
551 * @details Should be called under the execution queue lock. May sleep. May
552 * release and reclaim execution queue lock during its run.
554 static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
555 struct ecore_vlan_mac_obj *o)
557 /* If we got here, we're holding lock --> no WRITER exists */
559 ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
562 return ECORE_SUCCESS;
566 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
569 * @o: vlan_mac object
571 * @details May sleep. Claims and releases execution queue lock during its run.
573 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
574 struct ecore_vlan_mac_obj *o)
578 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
579 rc = __ecore_vlan_mac_h_read_lock(sc, o);
580 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
586 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
589 * @o: vlan_mac object
591 * @details Should be called under execution queue lock. Notice if a pending
592 * execution exists, it would be performed if this was the last
593 * reader. possibly releasing and reclaiming the execution queue lock.
595 static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
596 struct ecore_vlan_mac_obj *o)
598 if (!o->head_reader) {
599 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
600 #ifdef ECORE_STOP_ON_ERROR
605 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
609 /* It's possible a new pending execution was added, and that this reader
610 * was last - if so we need to execute the command.
612 if (!o->head_reader && o->head_exe_request) {
613 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
615 /* Writer release will do the trick */
616 __ecore_vlan_mac_h_write_unlock(sc, o);
621 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
624 * @o: vlan_mac object
626 * @details Notice if a pending execution exists, it would be performed if this
627 * was the last reader. Claims and releases the execution queue lock
630 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
631 struct ecore_vlan_mac_obj *o)
633 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
634 __ecore_vlan_mac_h_read_unlock(sc, o);
635 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
639 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
642 * @o: vlan_mac object
643 * @n: number of elements to get
644 * @base: base address for element placement
645 * @stride: stride between elements (in bytes)
647 static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
648 int n, uint8_t *base, uint8_t stride, uint8_t size)
650 struct ecore_vlan_mac_registry_elem *pos;
651 uint8_t *next = base;
655 ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
656 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
657 if (read_lock != ECORE_SUCCESS)
658 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
661 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
662 struct ecore_vlan_mac_registry_elem) {
664 ECORE_MEMCPY(next, &pos->u, size);
666 ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
668 next += stride + size;
672 if (read_lock == ECORE_SUCCESS) {
673 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
674 ecore_vlan_mac_h_read_unlock(sc, o);
677 return counter * ETH_ALEN;
680 /* check_add() callbacks */
681 static int ecore_check_mac_add(struct bxe_softc *sc,
682 struct ecore_vlan_mac_obj *o,
683 union ecore_classification_ramrod_data *data)
685 struct ecore_vlan_mac_registry_elem *pos;
687 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
689 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
692 /* Check if a requested MAC already exists */
693 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
694 struct ecore_vlan_mac_registry_elem)
695 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
696 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
699 return ECORE_SUCCESS;
702 static int ecore_check_vlan_add(struct bxe_softc *sc,
703 struct ecore_vlan_mac_obj *o,
704 union ecore_classification_ramrod_data *data)
706 struct ecore_vlan_mac_registry_elem *pos;
708 ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
710 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
711 struct ecore_vlan_mac_registry_elem)
712 if (data->vlan.vlan == pos->u.vlan.vlan)
715 return ECORE_SUCCESS;
718 static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
719 struct ecore_vlan_mac_obj *o,
720 union ecore_classification_ramrod_data *data)
722 struct ecore_vlan_mac_registry_elem *pos;
724 ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
725 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
727 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
728 struct ecore_vlan_mac_registry_elem)
729 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
730 (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
732 (data->vlan_mac.is_inner_mac ==
733 pos->u.vlan_mac.is_inner_mac))
736 return ECORE_SUCCESS;
739 /* check_del() callbacks */
740 static struct ecore_vlan_mac_registry_elem *
741 ecore_check_mac_del(struct bxe_softc *sc,
742 struct ecore_vlan_mac_obj *o,
743 union ecore_classification_ramrod_data *data)
745 struct ecore_vlan_mac_registry_elem *pos;
747 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
749 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
750 struct ecore_vlan_mac_registry_elem)
751 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
752 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
758 static struct ecore_vlan_mac_registry_elem *
759 ecore_check_vlan_del(struct bxe_softc *sc,
760 struct ecore_vlan_mac_obj *o,
761 union ecore_classification_ramrod_data *data)
763 struct ecore_vlan_mac_registry_elem *pos;
765 ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
767 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
768 struct ecore_vlan_mac_registry_elem)
769 if (data->vlan.vlan == pos->u.vlan.vlan)
775 static struct ecore_vlan_mac_registry_elem *
776 ecore_check_vlan_mac_del(struct bxe_softc *sc,
777 struct ecore_vlan_mac_obj *o,
778 union ecore_classification_ramrod_data *data)
780 struct ecore_vlan_mac_registry_elem *pos;
782 ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
783 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
785 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
786 struct ecore_vlan_mac_registry_elem)
787 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
788 (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
790 (data->vlan_mac.is_inner_mac ==
791 pos->u.vlan_mac.is_inner_mac))
797 /* check_move() callback */
798 static bool ecore_check_move(struct bxe_softc *sc,
799 struct ecore_vlan_mac_obj *src_o,
800 struct ecore_vlan_mac_obj *dst_o,
801 union ecore_classification_ramrod_data *data)
803 struct ecore_vlan_mac_registry_elem *pos;
806 /* Check if we can delete the requested configuration from the first
809 pos = src_o->check_del(sc, src_o, data);
811 /* check if configuration can be added */
812 rc = dst_o->check_add(sc, dst_o, data);
814 /* If this classification can not be added (is already set)
815 * or can't be deleted - return an error.
823 static bool ecore_check_move_always_err(
824 struct bxe_softc *sc,
825 struct ecore_vlan_mac_obj *src_o,
826 struct ecore_vlan_mac_obj *dst_o,
827 union ecore_classification_ramrod_data *data)
832 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
834 struct ecore_raw_obj *raw = &o->raw;
835 uint8_t rx_tx_flag = 0;
837 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
838 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
839 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
841 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
842 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
843 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
848 void ecore_set_mac_in_nig(struct bxe_softc *sc,
849 bool add, unsigned char *dev_addr, int index)
852 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
853 NIG_REG_LLH0_FUNC_MEM;
855 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
858 if (index > ECORE_LLH_CAM_MAX_PF_LINE)
861 ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
862 (add ? "ADD" : "DELETE"), index);
865 /* LLH_FUNC_MEM is a uint64_t WB register */
866 reg_offset += 8*index;
868 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
869 (dev_addr[4] << 8) | dev_addr[5]);
870 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
872 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
875 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
876 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
880 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
883 * @o: queue for which we want to configure this rule
884 * @add: if TRUE the command is an ADD command, DEL otherwise
885 * @opcode: CLASSIFY_RULE_OPCODE_XXX
886 * @hdr: pointer to a header to setup
889 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
890 struct ecore_vlan_mac_obj *o, bool add, int opcode,
891 struct eth_classify_cmd_header *hdr)
893 struct ecore_raw_obj *raw = &o->raw;
895 hdr->client_id = raw->cl_id;
896 hdr->func_id = raw->func_id;
898 /* Rx or/and Tx (internal switching) configuration ? */
899 hdr->cmd_general_data |=
900 ecore_vlan_mac_get_rx_tx_flag(o);
903 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
905 hdr->cmd_general_data |=
906 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
910 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
912 * @cid: connection id
913 * @type: ECORE_FILTER_XXX_PENDING
914 * @hdr: pointer to header to setup
917 * currently we always configure one rule and echo field to contain a CID and an
920 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
921 struct eth_classify_header *hdr, int rule_cnt)
923 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
924 (type << ECORE_SWCID_SHIFT));
925 hdr->rule_cnt = (uint8_t)rule_cnt;
928 /* hw_config() callbacks */
929 static void ecore_set_one_mac_e2(struct bxe_softc *sc,
930 struct ecore_vlan_mac_obj *o,
931 struct ecore_exeq_elem *elem, int rule_idx,
934 struct ecore_raw_obj *raw = &o->raw;
935 struct eth_classify_rules_ramrod_data *data =
936 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
937 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
938 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
939 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
940 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
941 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
943 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
944 * relevant. In addition, current implementation is tuned for a
947 * When multiple unicast ETH MACs PF configuration in switch
948 * independent mode is required (NetQ, multiple netdev MACs,
949 * etc.), consider better utilisation of 8 per function MAC
950 * entries in the LLH register. There is also
951 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
952 * total number of CAM entries to 16.
954 * Currently we won't configure NIG for MACs other than a primary ETH
955 * MAC and iSCSI L2 MAC.
957 * If this MAC is moving from one Queue to another, no need to change
960 if (cmd != ECORE_VLAN_MAC_MOVE) {
961 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
962 ecore_set_mac_in_nig(sc, add, mac,
963 ECORE_LLH_CAM_ISCSI_ETH_LINE);
964 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
965 ecore_set_mac_in_nig(sc, add, mac,
966 ECORE_LLH_CAM_ETH_LINE);
969 /* Reset the ramrod data buffer for the first rule */
971 ECORE_MEMSET(data, 0, sizeof(*data));
973 /* Setup a command header */
974 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
975 &rule_entry->mac.header);
977 ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
978 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
980 /* Set a MAC itself */
981 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
982 &rule_entry->mac.mac_mid,
983 &rule_entry->mac.mac_lsb, mac);
984 rule_entry->mac.inner_mac =
985 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
987 /* MOVE: Add a rule that will add this MAC to the target Queue */
988 if (cmd == ECORE_VLAN_MAC_MOVE) {
992 /* Setup ramrod data */
993 ecore_vlan_mac_set_cmd_hdr_e2(sc,
994 elem->cmd_data.vlan_mac.target_obj,
995 TRUE, CLASSIFY_RULE_OPCODE_MAC,
996 &rule_entry->mac.header);
998 /* Set a MAC itself */
999 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1000 &rule_entry->mac.mac_mid,
1001 &rule_entry->mac.mac_lsb, mac);
1002 rule_entry->mac.inner_mac =
1003 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1006 /* Set the ramrod data header */
1007 /* TODO: take this to the higher level in order to prevent multiple
1009 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1014 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1016 * @sc: device handle
1019 * @cam_offset: offset in cam memory
1020 * @hdr: pointer to a header to setup
1024 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1025 struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1026 struct mac_configuration_hdr *hdr)
1028 struct ecore_raw_obj *r = &o->raw;
1031 hdr->offset = (uint8_t)cam_offset;
1032 hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1033 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1034 (type << ECORE_SWCID_SHIFT));
1037 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1038 struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1039 uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1041 struct ecore_raw_obj *r = &o->raw;
1042 uint32_t cl_bit_vec = (1 << r->cl_id);
1044 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1045 cfg_entry->pf_id = r->func_id;
1046 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1049 ECORE_SET_FLAG(cfg_entry->flags,
1050 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1051 T_ETH_MAC_COMMAND_SET);
1052 ECORE_SET_FLAG(cfg_entry->flags,
1053 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1056 /* Set a MAC in a ramrod data */
1057 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1058 &cfg_entry->middle_mac_addr,
1059 &cfg_entry->lsb_mac_addr, mac);
1061 ECORE_SET_FLAG(cfg_entry->flags,
1062 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1063 T_ETH_MAC_COMMAND_INVALIDATE);
1066 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1067 struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1068 uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1070 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1071 struct ecore_raw_obj *raw = &o->raw;
1073 ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1075 ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1078 ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1079 (add ? "setting" : "clearing"),
1080 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1084 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1086 * @sc: device handle
1087 * @o: ecore_vlan_mac_obj
1088 * @elem: ecore_exeq_elem
1089 * @rule_idx: rule_idx
1090 * @cam_offset: cam_offset
1092 static void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1093 struct ecore_vlan_mac_obj *o,
1094 struct ecore_exeq_elem *elem, int rule_idx,
1097 struct ecore_raw_obj *raw = &o->raw;
1098 struct mac_configuration_cmd *config =
1099 (struct mac_configuration_cmd *)(raw->rdata);
1100 /* 57710 and 57711 do not support MOVE command,
1101 * so it's either ADD or DEL
1103 bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1106 /* Reset the ramrod data buffer */
1107 ECORE_MEMSET(config, 0, sizeof(*config));
1109 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1111 elem->cmd_data.vlan_mac.u.mac.mac, 0,
1112 ETH_VLAN_FILTER_ANY_VLAN, config);
1115 static void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1116 struct ecore_vlan_mac_obj *o,
1117 struct ecore_exeq_elem *elem, int rule_idx,
1120 struct ecore_raw_obj *raw = &o->raw;
1121 struct eth_classify_rules_ramrod_data *data =
1122 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1123 int rule_cnt = rule_idx + 1;
1124 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1125 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1126 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1127 uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1129 /* Reset the ramrod data buffer for the first rule */
1131 ECORE_MEMSET(data, 0, sizeof(*data));
1133 /* Set a rule header */
1134 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1135 &rule_entry->vlan.header);
1137 ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1140 /* Set a VLAN itself */
1141 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1143 /* MOVE: Add a rule that will add this MAC to the target Queue */
1144 if (cmd == ECORE_VLAN_MAC_MOVE) {
1148 /* Setup ramrod data */
1149 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1150 elem->cmd_data.vlan_mac.target_obj,
1151 TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1152 &rule_entry->vlan.header);
1154 /* Set a VLAN itself */
1155 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1158 /* Set the ramrod data header */
1159 /* TODO: take this to the higher level in order to prevent multiple
1161 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1165 static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1166 struct ecore_vlan_mac_obj *o,
1167 struct ecore_exeq_elem *elem,
1168 int rule_idx, int cam_offset)
1170 struct ecore_raw_obj *raw = &o->raw;
1171 struct eth_classify_rules_ramrod_data *data =
1172 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1173 int rule_cnt = rule_idx + 1;
1174 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1175 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1176 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1177 uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1178 uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1180 /* Reset the ramrod data buffer for the first rule */
1182 ECORE_MEMSET(data, 0, sizeof(*data));
1184 /* Set a rule header */
1185 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1186 &rule_entry->pair.header);
1188 /* Set VLAN and MAC themselves */
1189 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1190 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1191 &rule_entry->pair.mac_mid,
1192 &rule_entry->pair.mac_lsb, mac);
1193 rule_entry->pair.inner_mac =
1194 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1195 /* MOVE: Add a rule that will add this MAC to the target Queue */
1196 if (cmd == ECORE_VLAN_MAC_MOVE) {
1200 /* Setup ramrod data */
1201 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1202 elem->cmd_data.vlan_mac.target_obj,
1203 TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1204 &rule_entry->pair.header);
1206 /* Set a VLAN itself */
1207 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1208 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1209 &rule_entry->pair.mac_mid,
1210 &rule_entry->pair.mac_lsb, mac);
1211 rule_entry->pair.inner_mac =
1212 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1215 /* Set the ramrod data header */
1216 /* TODO: take this to the higher level in order to prevent multiple
1218 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1223 * ecore_set_one_vlan_mac_e1h -
1225 * @sc: device handle
1226 * @o: ecore_vlan_mac_obj
1227 * @elem: ecore_exeq_elem
1228 * @rule_idx: rule_idx
1229 * @cam_offset: cam_offset
1231 static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1232 struct ecore_vlan_mac_obj *o,
1233 struct ecore_exeq_elem *elem,
1234 int rule_idx, int cam_offset)
1236 struct ecore_raw_obj *raw = &o->raw;
1237 struct mac_configuration_cmd *config =
1238 (struct mac_configuration_cmd *)(raw->rdata);
1239 /* 57710 and 57711 do not support MOVE command,
1240 * so it's either ADD or DEL
1242 bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1245 /* Reset the ramrod data buffer */
1246 ECORE_MEMSET(config, 0, sizeof(*config));
1248 ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1250 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1251 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1252 ETH_VLAN_FILTER_CLASSIFY, config);
1255 #define list_next_entry(pos, member) \
1256 list_entry((pos)->member.next, typeof(*(pos)), member)
1259 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1261 * @sc: device handle
1262 * @p: command parameters
1263 * @ppos: pointer to the cookie
1265 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1266 * previously configured elements list.
1268 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1271 * pointer to the cookie - that should be given back in the next call to make
1272 * function handle the next element. If *ppos is set to NULL it will restart the
1273 * iterator. If returned *ppos == NULL this means that the last element has been
1277 static int ecore_vlan_mac_restore(struct bxe_softc *sc,
1278 struct ecore_vlan_mac_ramrod_params *p,
1279 struct ecore_vlan_mac_registry_elem **ppos)
1281 struct ecore_vlan_mac_registry_elem *pos;
1282 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1284 /* If list is empty - there is nothing to do here */
1285 if (ECORE_LIST_IS_EMPTY(&o->head)) {
1290 /* make a step... */
1292 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1293 struct ecore_vlan_mac_registry_elem,
1296 *ppos = ECORE_LIST_NEXT(*ppos, link,
1297 struct ecore_vlan_mac_registry_elem);
1301 /* If it's the last step - return NULL */
1302 if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1305 /* Prepare a 'user_req' */
1306 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1308 /* Set the command */
1309 p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1311 /* Set vlan_mac_flags */
1312 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1314 /* Set a restore bit */
1315 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1317 return ecore_config_vlan_mac(sc, p);
1320 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1321 * pointer to an element with a specific criteria and NULL if such an element
1322 * hasn't been found.
1324 static struct ecore_exeq_elem *ecore_exeq_get_mac(
1325 struct ecore_exe_queue_obj *o,
1326 struct ecore_exeq_elem *elem)
1328 struct ecore_exeq_elem *pos;
1329 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1331 /* Check pending for execution commands */
1332 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1333 struct ecore_exeq_elem)
1334 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1336 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1342 static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1343 struct ecore_exe_queue_obj *o,
1344 struct ecore_exeq_elem *elem)
1346 struct ecore_exeq_elem *pos;
1347 struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1349 /* Check pending for execution commands */
1350 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1351 struct ecore_exeq_elem)
1352 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1354 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1360 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1361 struct ecore_exe_queue_obj *o,
1362 struct ecore_exeq_elem *elem)
1364 struct ecore_exeq_elem *pos;
1365 struct ecore_vlan_mac_ramrod_data *data =
1366 &elem->cmd_data.vlan_mac.u.vlan_mac;
1368 /* Check pending for execution commands */
1369 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1370 struct ecore_exeq_elem)
1371 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1373 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1380 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1382 * @sc: device handle
1383 * @qo: ecore_qable_obj
1384 * @elem: ecore_exeq_elem
1386 * Checks that the requested configuration can be added. If yes and if
1387 * requested, consume CAM credit.
1389 * The 'validate' is run after the 'optimize'.
1392 static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1393 union ecore_qable_obj *qo,
1394 struct ecore_exeq_elem *elem)
1396 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1397 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1400 /* Check the registry */
1401 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1403 ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1407 /* Check if there is a pending ADD command for this
1408 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1410 if (exeq->get(exeq, elem)) {
1411 ECORE_MSG(sc, "There is a pending ADD command already\n");
1412 return ECORE_EXISTS;
1415 /* TODO: Check the pending MOVE from other objects where this
1416 * object is a destination object.
1419 /* Consume the credit if not requested not to */
1420 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1421 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1425 return ECORE_SUCCESS;
1429 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1431 * @sc: device handle
1432 * @qo: quable object to check
1433 * @elem: element that needs to be deleted
1435 * Checks that the requested configuration can be deleted. If yes and if
1436 * requested, returns a CAM credit.
1438 * The 'validate' is run after the 'optimize'.
1440 static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1441 union ecore_qable_obj *qo,
1442 struct ecore_exeq_elem *elem)
1444 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1445 struct ecore_vlan_mac_registry_elem *pos;
1446 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1447 struct ecore_exeq_elem query_elem;
1449 /* If this classification can not be deleted (doesn't exist)
1450 * - return a ECORE_EXIST.
1452 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1454 ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1455 return ECORE_EXISTS;
1458 /* Check if there are pending DEL or MOVE commands for this
1459 * MAC/VLAN/VLAN-MAC. Return an error if so.
1461 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1463 /* Check for MOVE commands */
1464 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1465 if (exeq->get(exeq, &query_elem)) {
1466 ECORE_ERR("There is a pending MOVE command already\n");
1470 /* Check for DEL commands */
1471 if (exeq->get(exeq, elem)) {
1472 ECORE_MSG(sc, "There is a pending DEL command already\n");
1473 return ECORE_EXISTS;
1476 /* Return the credit to the credit pool if not requested not to */
1477 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1478 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1479 o->put_credit(o))) {
1480 ECORE_ERR("Failed to return a credit\n");
1484 return ECORE_SUCCESS;
1488 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1490 * @sc: device handle
1491 * @qo: quable object to check (source)
1492 * @elem: element that needs to be moved
1494 * Checks that the requested configuration can be moved. If yes and if
1495 * requested, returns a CAM credit.
1497 * The 'validate' is run after the 'optimize'.
1499 static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1500 union ecore_qable_obj *qo,
1501 struct ecore_exeq_elem *elem)
1503 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1504 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1505 struct ecore_exeq_elem query_elem;
1506 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1507 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1509 /* Check if we can perform this operation based on the current registry
1512 if (!src_o->check_move(sc, src_o, dest_o,
1513 &elem->cmd_data.vlan_mac.u)) {
1514 ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1518 /* Check if there is an already pending DEL or MOVE command for the
1519 * source object or ADD command for a destination object. Return an
1522 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1524 /* Check DEL on source */
1525 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1526 if (src_exeq->get(src_exeq, &query_elem)) {
1527 ECORE_ERR("There is a pending DEL command on the source queue already\n");
1531 /* Check MOVE on source */
1532 if (src_exeq->get(src_exeq, elem)) {
1533 ECORE_MSG(sc, "There is a pending MOVE command already\n");
1534 return ECORE_EXISTS;
1537 /* Check ADD on destination */
1538 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1539 if (dest_exeq->get(dest_exeq, &query_elem)) {
1540 ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1544 /* Consume the credit if not requested not to */
1545 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1546 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1547 dest_o->get_credit(dest_o)))
1550 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1551 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1552 src_o->put_credit(src_o))) {
1553 /* return the credit taken from dest... */
1554 dest_o->put_credit(dest_o);
1558 return ECORE_SUCCESS;
1561 static int ecore_validate_vlan_mac(struct bxe_softc *sc,
1562 union ecore_qable_obj *qo,
1563 struct ecore_exeq_elem *elem)
1565 switch (elem->cmd_data.vlan_mac.cmd) {
1566 case ECORE_VLAN_MAC_ADD:
1567 return ecore_validate_vlan_mac_add(sc, qo, elem);
1568 case ECORE_VLAN_MAC_DEL:
1569 return ecore_validate_vlan_mac_del(sc, qo, elem);
1570 case ECORE_VLAN_MAC_MOVE:
1571 return ecore_validate_vlan_mac_move(sc, qo, elem);
1577 static int ecore_remove_vlan_mac(struct bxe_softc *sc,
1578 union ecore_qable_obj *qo,
1579 struct ecore_exeq_elem *elem)
1583 /* If consumption wasn't required, nothing to do */
1584 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1585 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1586 return ECORE_SUCCESS;
1588 switch (elem->cmd_data.vlan_mac.cmd) {
1589 case ECORE_VLAN_MAC_ADD:
1590 case ECORE_VLAN_MAC_MOVE:
1591 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1593 case ECORE_VLAN_MAC_DEL:
1594 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1603 return ECORE_SUCCESS;
1607 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1609 * @sc: device handle
1610 * @o: ecore_vlan_mac_obj
1613 static int ecore_wait_vlan_mac(struct bxe_softc *sc,
1614 struct ecore_vlan_mac_obj *o)
1617 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1618 struct ecore_raw_obj *raw = &o->raw;
1621 /* Wait for the current command to complete */
1622 rc = raw->wait_comp(sc, raw);
1626 /* Wait until there are no pending commands */
1627 if (!ecore_exe_queue_empty(exeq))
1628 ECORE_WAIT(sc, 1000);
1630 return ECORE_SUCCESS;
1633 return ECORE_TIMEOUT;
1636 static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1637 struct ecore_vlan_mac_obj *o,
1638 unsigned long *ramrod_flags)
1640 int rc = ECORE_SUCCESS;
1642 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1644 ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1645 rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1647 if (rc != ECORE_SUCCESS) {
1648 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1650 /** Calling function should not diffrentiate between this case
1651 * and the case in which there is already a pending ramrod
1655 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1657 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1663 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1665 * @sc: device handle
1666 * @o: ecore_vlan_mac_obj
1668 * @cont: if TRUE schedule next execution chunk
1671 static int ecore_complete_vlan_mac(struct bxe_softc *sc,
1672 struct ecore_vlan_mac_obj *o,
1673 union event_ring_elem *cqe,
1674 unsigned long *ramrod_flags)
1676 struct ecore_raw_obj *r = &o->raw;
1679 /* Clearing the pending list & raw state should be made
1680 * atomically (as execution flow assumes they represent the same)
1682 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1684 /* Reset pending list */
1685 __ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1688 r->clear_pending(r);
1690 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1692 /* If ramrod failed this is most likely a SW bug */
1693 if (cqe->message.error)
1696 /* Run the next bulk of pending commands if requested */
1697 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1698 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1703 /* If there is more work to do return PENDING */
1704 if (!ecore_exe_queue_empty(&o->exe_queue))
1705 return ECORE_PENDING;
1707 return ECORE_SUCCESS;
1711 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1713 * @sc: device handle
1714 * @o: ecore_qable_obj
1715 * @elem: ecore_exeq_elem
1717 static int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1718 union ecore_qable_obj *qo,
1719 struct ecore_exeq_elem *elem)
1721 struct ecore_exeq_elem query, *pos;
1722 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1723 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1725 ECORE_MEMCPY(&query, elem, sizeof(query));
1727 switch (elem->cmd_data.vlan_mac.cmd) {
1728 case ECORE_VLAN_MAC_ADD:
1729 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1731 case ECORE_VLAN_MAC_DEL:
1732 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1735 /* Don't handle anything other than ADD or DEL */
1739 /* If we found the appropriate element - delete it */
1740 pos = exeq->get(exeq, &query);
1743 /* Return the credit of the optimized command */
1744 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1745 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1746 if ((query.cmd_data.vlan_mac.cmd ==
1747 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1748 ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1750 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1751 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1756 ECORE_MSG(sc, "Optimizing %s command\n",
1757 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1760 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1761 ecore_exe_queue_free_elem(sc, pos);
1769 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1771 * @sc: device handle
1777 * prepare a registry element according to the current command request.
1779 static inline int ecore_vlan_mac_get_registry_elem(
1780 struct bxe_softc *sc,
1781 struct ecore_vlan_mac_obj *o,
1782 struct ecore_exeq_elem *elem,
1784 struct ecore_vlan_mac_registry_elem **re)
1786 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1787 struct ecore_vlan_mac_registry_elem *reg_elem;
1789 /* Allocate a new registry element if needed. */
1791 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1792 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1796 /* Get a new CAM offset */
1797 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1798 /* This shall never happen, because we have checked the
1799 * CAM availability in the 'validate'.
1801 ECORE_DBG_BREAK_IF(1);
1802 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1806 ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1808 /* Set a VLAN-MAC data */
1809 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u,
1810 sizeof(reg_elem->u));
1812 /* Copy the flags (needed for DEL and RESTORE flows) */
1813 reg_elem->vlan_mac_flags =
1814 elem->cmd_data.vlan_mac.vlan_mac_flags;
1815 } else /* DEL, RESTORE */
1816 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1819 return ECORE_SUCCESS;
1823 * ecore_execute_vlan_mac - execute vlan mac command
1825 * @sc: device handle
1830 * go and send a ramrod!
1832 static int ecore_execute_vlan_mac(struct bxe_softc *sc,
1833 union ecore_qable_obj *qo,
1834 ecore_list_t *exe_chunk,
1835 unsigned long *ramrod_flags)
1837 struct ecore_exeq_elem *elem;
1838 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1839 struct ecore_raw_obj *r = &o->raw;
1841 bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1842 bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1843 struct ecore_vlan_mac_registry_elem *reg_elem;
1844 enum ecore_vlan_mac_cmd cmd;
1846 /* If DRIVER_ONLY execution is requested, cleanup a registry
1847 * and exit. Otherwise send a ramrod to FW.
1850 ECORE_DBG_BREAK_IF(r->check_pending(r));
1855 /* Fill the ramrod data */
1856 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1857 struct ecore_exeq_elem) {
1858 cmd = elem->cmd_data.vlan_mac.cmd;
1859 /* We will add to the target object in MOVE command, so
1860 * change the object for a CAM search.
1862 if (cmd == ECORE_VLAN_MAC_MOVE)
1863 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1867 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1873 ECORE_DBG_BREAK_IF(!reg_elem);
1875 /* Push a new entry into the registry */
1877 ((cmd == ECORE_VLAN_MAC_ADD) ||
1878 (cmd == ECORE_VLAN_MAC_MOVE)))
1879 ECORE_LIST_PUSH_HEAD(®_elem->link,
1882 /* Configure a single command in a ramrod data buffer */
1883 o->set_one_rule(sc, o, elem, idx,
1884 reg_elem->cam_offset);
1886 /* MOVE command consumes 2 entries in the ramrod data */
1887 if (cmd == ECORE_VLAN_MAC_MOVE)
1894 * No need for an explicit memory barrier here as long we would
1895 * need to ensure the ordering of writing to the SPQ element
1896 * and updating of the SPQ producer which involves a memory
1897 * read and we will have to put a full memory barrier there
1898 * (inside ecore_sp_post()).
1901 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1903 ETH_CONNECTION_TYPE);
1908 /* Now, when we are done with the ramrod - clean up the registry */
1909 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1910 struct ecore_exeq_elem) {
1911 cmd = elem->cmd_data.vlan_mac.cmd;
1912 if ((cmd == ECORE_VLAN_MAC_DEL) ||
1913 (cmd == ECORE_VLAN_MAC_MOVE)) {
1914 reg_elem = o->check_del(sc, o,
1915 &elem->cmd_data.vlan_mac.u);
1917 ECORE_DBG_BREAK_IF(!reg_elem);
1919 o->put_cam_offset(o, reg_elem->cam_offset);
1920 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head);
1921 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1926 return ECORE_PENDING;
1928 return ECORE_SUCCESS;
1931 r->clear_pending(r);
1933 /* Cleanup a registry in case of a failure */
1934 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1935 struct ecore_exeq_elem) {
1936 cmd = elem->cmd_data.vlan_mac.cmd;
1938 if (cmd == ECORE_VLAN_MAC_MOVE)
1939 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1943 /* Delete all newly added above entries */
1945 ((cmd == ECORE_VLAN_MAC_ADD) ||
1946 (cmd == ECORE_VLAN_MAC_MOVE))) {
1947 reg_elem = o->check_del(sc, cam_obj,
1948 &elem->cmd_data.vlan_mac.u);
1950 ECORE_LIST_REMOVE_ENTRY(®_elem->link,
1952 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1960 static inline int ecore_vlan_mac_push_new_cmd(
1961 struct bxe_softc *sc,
1962 struct ecore_vlan_mac_ramrod_params *p)
1964 struct ecore_exeq_elem *elem;
1965 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1966 bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1968 /* Allocate the execution queue element */
1969 elem = ecore_exe_queue_alloc_elem(sc);
1973 /* Set the command 'length' */
1974 switch (p->user_req.cmd) {
1975 case ECORE_VLAN_MAC_MOVE:
1982 /* Fill the object specific info */
1983 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1985 /* Try to add a new command to the pending list */
1986 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1990 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1992 * @sc: device handle
1996 int ecore_config_vlan_mac(struct bxe_softc *sc,
1997 struct ecore_vlan_mac_ramrod_params *p)
1999 int rc = ECORE_SUCCESS;
2000 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2001 unsigned long *ramrod_flags = &p->ramrod_flags;
2002 bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2003 struct ecore_raw_obj *raw = &o->raw;
2006 * Add new elements to the execution list for commands that require it.
2009 rc = ecore_vlan_mac_push_new_cmd(sc, p);
2014 /* If nothing will be executed further in this iteration we want to
2015 * return PENDING if there are pending commands
2017 if (!ecore_exe_queue_empty(&o->exe_queue))
2020 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
2021 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2022 raw->clear_pending(raw);
2025 /* Execute commands if required */
2026 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2027 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2028 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2034 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2035 * then user want to wait until the last command is done.
2037 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2038 /* Wait maximum for the current exe_queue length iterations plus
2039 * one (for the current pending command).
2041 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2043 while (!ecore_exe_queue_empty(&o->exe_queue) &&
2046 /* Wait for the current command to complete */
2047 rc = raw->wait_comp(sc, raw);
2051 /* Make a next step */
2052 rc = __ecore_vlan_mac_execute_step(sc,
2059 return ECORE_SUCCESS;
2066 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2068 * @sc: device handle
2071 * @ramrod_flags: execution flags to be used for this deletion
2073 * if the last operation has completed successfully and there are no
2074 * more elements left, positive value if the last operation has completed
2075 * successfully and there are more previously configured elements, negative
2076 * value is current operation has failed.
2078 static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2079 struct ecore_vlan_mac_obj *o,
2080 unsigned long *vlan_mac_flags,
2081 unsigned long *ramrod_flags)
2083 struct ecore_vlan_mac_registry_elem *pos = NULL;
2084 struct ecore_vlan_mac_ramrod_params p;
2085 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2086 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2090 /* Clear pending commands first */
2092 ECORE_SPIN_LOCK_BH(&exeq->lock);
2094 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2095 &exeq->exe_queue, link,
2096 struct ecore_exeq_elem) {
2097 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2099 rc = exeq->remove(sc, exeq->owner, exeq_pos);
2101 ECORE_ERR("Failed to remove command\n");
2102 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2105 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2107 ecore_exe_queue_free_elem(sc, exeq_pos);
2111 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2113 /* Prepare a command request */
2114 ECORE_MEMSET(&p, 0, sizeof(p));
2116 p.ramrod_flags = *ramrod_flags;
2117 p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2119 /* Add all but the last VLAN-MAC to the execution queue without actually
2120 * execution anything.
2122 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2123 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2124 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2126 ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2127 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2128 if (read_lock != ECORE_SUCCESS)
2131 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2132 struct ecore_vlan_mac_registry_elem) {
2133 if (pos->vlan_mac_flags == *vlan_mac_flags) {
2134 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2135 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2136 rc = ecore_config_vlan_mac(sc, &p);
2138 ECORE_ERR("Failed to add a new DEL command\n");
2139 ecore_vlan_mac_h_read_unlock(sc, o);
2145 ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2146 ecore_vlan_mac_h_read_unlock(sc, o);
2148 p.ramrod_flags = *ramrod_flags;
2149 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2151 return ecore_config_vlan_mac(sc, &p);
2154 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2155 uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2156 unsigned long *pstate, ecore_obj_type type)
2158 raw->func_id = func_id;
2162 raw->rdata_mapping = rdata_mapping;
2164 raw->pstate = pstate;
2165 raw->obj_type = type;
2166 raw->check_pending = ecore_raw_check_pending;
2167 raw->clear_pending = ecore_raw_clear_pending;
2168 raw->set_pending = ecore_raw_set_pending;
2169 raw->wait_comp = ecore_raw_wait;
2172 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2173 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2174 int state, unsigned long *pstate, ecore_obj_type type,
2175 struct ecore_credit_pool_obj *macs_pool,
2176 struct ecore_credit_pool_obj *vlans_pool)
2178 ECORE_LIST_INIT(&o->head);
2180 o->head_exe_request = FALSE;
2181 o->saved_ramrod_flags = 0;
2183 o->macs_pool = macs_pool;
2184 o->vlans_pool = vlans_pool;
2186 o->delete_all = ecore_vlan_mac_del_all;
2187 o->restore = ecore_vlan_mac_restore;
2188 o->complete = ecore_complete_vlan_mac;
2189 o->wait = ecore_wait_vlan_mac;
2191 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2192 state, pstate, type);
2195 void ecore_init_mac_obj(struct bxe_softc *sc,
2196 struct ecore_vlan_mac_obj *mac_obj,
2197 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2198 ecore_dma_addr_t rdata_mapping, int state,
2199 unsigned long *pstate, ecore_obj_type type,
2200 struct ecore_credit_pool_obj *macs_pool)
2202 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2204 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2205 rdata_mapping, state, pstate, type,
2208 /* CAM credit pool handling */
2209 mac_obj->get_credit = ecore_get_credit_mac;
2210 mac_obj->put_credit = ecore_put_credit_mac;
2211 mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2212 mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2214 if (CHIP_IS_E1x(sc)) {
2215 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
2216 mac_obj->check_del = ecore_check_mac_del;
2217 mac_obj->check_add = ecore_check_mac_add;
2218 mac_obj->check_move = ecore_check_move_always_err;
2219 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2222 ecore_exe_queue_init(sc,
2223 &mac_obj->exe_queue, 1, qable_obj,
2224 ecore_validate_vlan_mac,
2225 ecore_remove_vlan_mac,
2226 ecore_optimize_vlan_mac,
2227 ecore_execute_vlan_mac,
2228 ecore_exeq_get_mac);
2230 mac_obj->set_one_rule = ecore_set_one_mac_e2;
2231 mac_obj->check_del = ecore_check_mac_del;
2232 mac_obj->check_add = ecore_check_mac_add;
2233 mac_obj->check_move = ecore_check_move;
2234 mac_obj->ramrod_cmd =
2235 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2236 mac_obj->get_n_elements = ecore_get_n_elements;
2239 ecore_exe_queue_init(sc,
2240 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2241 qable_obj, ecore_validate_vlan_mac,
2242 ecore_remove_vlan_mac,
2243 ecore_optimize_vlan_mac,
2244 ecore_execute_vlan_mac,
2245 ecore_exeq_get_mac);
2249 void ecore_init_vlan_obj(struct bxe_softc *sc,
2250 struct ecore_vlan_mac_obj *vlan_obj,
2251 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2252 ecore_dma_addr_t rdata_mapping, int state,
2253 unsigned long *pstate, ecore_obj_type type,
2254 struct ecore_credit_pool_obj *vlans_pool)
2256 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2258 ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2259 rdata_mapping, state, pstate, type, NULL,
2262 vlan_obj->get_credit = ecore_get_credit_vlan;
2263 vlan_obj->put_credit = ecore_put_credit_vlan;
2264 vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2265 vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2267 if (CHIP_IS_E1x(sc)) {
2268 ECORE_ERR("Do not support chips others than E2 and newer\n");
2271 vlan_obj->set_one_rule = ecore_set_one_vlan_e2;
2272 vlan_obj->check_del = ecore_check_vlan_del;
2273 vlan_obj->check_add = ecore_check_vlan_add;
2274 vlan_obj->check_move = ecore_check_move;
2275 vlan_obj->ramrod_cmd =
2276 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2277 vlan_obj->get_n_elements = ecore_get_n_elements;
2280 ecore_exe_queue_init(sc,
2281 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2282 qable_obj, ecore_validate_vlan_mac,
2283 ecore_remove_vlan_mac,
2284 ecore_optimize_vlan_mac,
2285 ecore_execute_vlan_mac,
2286 ecore_exeq_get_vlan);
2290 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2291 struct ecore_vlan_mac_obj *vlan_mac_obj,
2292 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2293 ecore_dma_addr_t rdata_mapping, int state,
2294 unsigned long *pstate, ecore_obj_type type,
2295 struct ecore_credit_pool_obj *macs_pool,
2296 struct ecore_credit_pool_obj *vlans_pool)
2298 union ecore_qable_obj *qable_obj =
2299 (union ecore_qable_obj *)vlan_mac_obj;
2301 ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2302 rdata_mapping, state, pstate, type,
2303 macs_pool, vlans_pool);
2305 /* CAM pool handling */
2306 vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2307 vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2308 /* CAM offset is relevant for 57710 and 57711 chips only which have a
2309 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2310 * will be taken from MACs' pool object only.
2312 vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2313 vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2315 if (CHIP_IS_E1(sc)) {
2316 ECORE_ERR("Do not support chips others than E2\n");
2318 } else if (CHIP_IS_E1H(sc)) {
2319 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e1h;
2320 vlan_mac_obj->check_del = ecore_check_vlan_mac_del;
2321 vlan_mac_obj->check_add = ecore_check_vlan_mac_add;
2322 vlan_mac_obj->check_move = ecore_check_move_always_err;
2323 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2326 ecore_exe_queue_init(sc,
2327 &vlan_mac_obj->exe_queue, 1, qable_obj,
2328 ecore_validate_vlan_mac,
2329 ecore_remove_vlan_mac,
2330 ecore_optimize_vlan_mac,
2331 ecore_execute_vlan_mac,
2332 ecore_exeq_get_vlan_mac);
2334 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e2;
2335 vlan_mac_obj->check_del = ecore_check_vlan_mac_del;
2336 vlan_mac_obj->check_add = ecore_check_vlan_mac_add;
2337 vlan_mac_obj->check_move = ecore_check_move;
2338 vlan_mac_obj->ramrod_cmd =
2339 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2342 ecore_exe_queue_init(sc,
2343 &vlan_mac_obj->exe_queue,
2344 CLASSIFY_RULES_COUNT,
2345 qable_obj, ecore_validate_vlan_mac,
2346 ecore_remove_vlan_mac,
2347 ecore_optimize_vlan_mac,
2348 ecore_execute_vlan_mac,
2349 ecore_exeq_get_vlan_mac);
2353 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2354 static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2355 struct tstorm_eth_mac_filter_config *mac_filters,
2358 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2360 uint32_t addr = BAR_TSTRORM_INTMEM +
2361 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2363 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2366 static int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2367 struct ecore_rx_mode_ramrod_params *p)
2369 /* update the sc MAC filter structure */
2370 uint32_t mask = (1 << p->cl_id);
2372 struct tstorm_eth_mac_filter_config *mac_filters =
2373 (struct tstorm_eth_mac_filter_config *)p->rdata;
2375 /* initial setting is drop-all */
2376 uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2377 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2378 uint8_t unmatched_unicast = 0;
2380 /* In e1x there we only take into account rx accept flag since tx switching
2382 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2383 /* accept matched ucast */
2386 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2387 /* accept matched mcast */
2390 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2391 /* accept all mcast */
2395 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2396 /* accept all mcast */
2400 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2401 /* accept (all) bcast */
2403 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2404 /* accept unmatched unicasts */
2405 unmatched_unicast = 1;
2407 mac_filters->ucast_drop_all = drop_all_ucast ?
2408 mac_filters->ucast_drop_all | mask :
2409 mac_filters->ucast_drop_all & ~mask;
2411 mac_filters->mcast_drop_all = drop_all_mcast ?
2412 mac_filters->mcast_drop_all | mask :
2413 mac_filters->mcast_drop_all & ~mask;
2415 mac_filters->ucast_accept_all = accp_all_ucast ?
2416 mac_filters->ucast_accept_all | mask :
2417 mac_filters->ucast_accept_all & ~mask;
2419 mac_filters->mcast_accept_all = accp_all_mcast ?
2420 mac_filters->mcast_accept_all | mask :
2421 mac_filters->mcast_accept_all & ~mask;
2423 mac_filters->bcast_accept_all = accp_all_bcast ?
2424 mac_filters->bcast_accept_all | mask :
2425 mac_filters->bcast_accept_all & ~mask;
2427 mac_filters->unmatched_unicast = unmatched_unicast ?
2428 mac_filters->unmatched_unicast | mask :
2429 mac_filters->unmatched_unicast & ~mask;
2431 ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2432 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2433 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2434 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2435 mac_filters->bcast_accept_all);
2437 /* write the MAC filter structure*/
2438 __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2440 /* The operation is completed */
2441 ECORE_CLEAR_BIT(p->state, p->pstate);
2442 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2444 return ECORE_SUCCESS;
2447 /* Setup ramrod data */
2448 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2449 struct eth_classify_header *hdr,
2452 hdr->echo = ECORE_CPU_TO_LE32(cid);
2453 hdr->rule_cnt = rule_cnt;
2456 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2457 unsigned long *accept_flags,
2458 struct eth_filter_rules_cmd *cmd,
2459 bool clear_accept_all)
2463 /* start with 'drop-all' */
2464 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2465 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2467 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2468 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2470 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2471 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2473 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2474 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2475 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2478 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2479 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2480 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2482 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2483 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2485 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2486 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2487 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2489 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2490 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2492 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2493 if (clear_accept_all) {
2494 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2495 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2496 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2497 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2500 cmd->state = ECORE_CPU_TO_LE16(state);
2503 static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2504 struct ecore_rx_mode_ramrod_params *p)
2506 struct eth_filter_rules_ramrod_data *data = p->rdata;
2508 uint8_t rule_idx = 0;
2510 /* Reset the ramrod data buffer */
2511 ECORE_MEMSET(data, 0, sizeof(*data));
2513 /* Setup ramrod data */
2515 /* Tx (internal switching) */
2516 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2517 data->rules[rule_idx].client_id = p->cl_id;
2518 data->rules[rule_idx].func_id = p->func_id;
2520 data->rules[rule_idx].cmd_general_data =
2521 ETH_FILTER_RULES_CMD_TX_CMD;
2523 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2524 &(data->rules[rule_idx++]),
2529 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2530 data->rules[rule_idx].client_id = p->cl_id;
2531 data->rules[rule_idx].func_id = p->func_id;
2533 data->rules[rule_idx].cmd_general_data =
2534 ETH_FILTER_RULES_CMD_RX_CMD;
2536 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2537 &(data->rules[rule_idx++]),
2541 /* If FCoE Queue configuration has been requested configure the Rx and
2542 * internal switching modes for this queue in separate rules.
2544 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2545 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2547 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2548 /* Tx (internal switching) */
2549 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2550 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2551 data->rules[rule_idx].func_id = p->func_id;
2553 data->rules[rule_idx].cmd_general_data =
2554 ETH_FILTER_RULES_CMD_TX_CMD;
2556 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2557 &(data->rules[rule_idx]),
2563 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2564 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2565 data->rules[rule_idx].func_id = p->func_id;
2567 data->rules[rule_idx].cmd_general_data =
2568 ETH_FILTER_RULES_CMD_RX_CMD;
2570 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2571 &(data->rules[rule_idx]),
2577 /* Set the ramrod header (most importantly - number of rules to
2580 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2582 ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2583 data->header.rule_cnt, p->rx_accept_flags,
2584 p->tx_accept_flags);
2586 /* No need for an explicit memory barrier here as long we would
2587 * need to ensure the ordering of writing to the SPQ element
2588 * and updating of the SPQ producer which involves a memory
2589 * read and we will have to put a full memory barrier there
2590 * (inside ecore_sp_post()).
2594 rc = ecore_sp_post(sc,
2595 RAMROD_CMD_ID_ETH_FILTER_RULES,
2598 ETH_CONNECTION_TYPE);
2602 /* Ramrod completion is pending */
2603 return ECORE_PENDING;
2606 static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2607 struct ecore_rx_mode_ramrod_params *p)
2609 return ecore_state_wait(sc, p->state, p->pstate);
2612 static int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2613 struct ecore_rx_mode_ramrod_params *p)
2616 return ECORE_SUCCESS;
2619 int ecore_config_rx_mode(struct bxe_softc *sc,
2620 struct ecore_rx_mode_ramrod_params *p)
2624 /* Configure the new classification in the chip */
2625 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2629 /* Wait for a ramrod completion if was requested */
2630 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2631 rc = p->rx_mode_obj->wait_comp(sc, p);
2639 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
2640 struct ecore_rx_mode_obj *o)
2642 if (CHIP_IS_E1x(sc)) {
2643 o->wait_comp = ecore_empty_rx_mode_wait;
2644 o->config_rx_mode = ecore_set_rx_mode_e1x;
2646 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2647 o->config_rx_mode = ecore_set_rx_mode_e2;
2651 /********************* Multicast verbs: SET, CLEAR ****************************/
2652 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2654 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2657 struct ecore_mcast_mac_elem {
2658 ecore_list_entry_t link;
2659 uint8_t mac[ETH_ALEN];
2660 uint8_t pad[2]; /* For a natural alignment of the following buffer */
2663 struct ecore_pending_mcast_cmd {
2664 ecore_list_entry_t link;
2665 int type; /* ECORE_MCAST_CMD_X */
2667 ecore_list_t macs_head;
2668 uint32_t macs_num; /* Needed for DEL command */
2669 int next_bin; /* Needed for RESTORE flow with aprox match */
2672 bool done; /* set to TRUE, when the command has been handled,
2673 * practically used in 57712 handling only, where one pending
2674 * command may be handled in a few operations. As long as for
2675 * other chips every operation handling is completed in a
2676 * single ramrod, there is no need to utilize this field.
2680 static int ecore_mcast_wait(struct bxe_softc *sc,
2681 struct ecore_mcast_obj *o)
2683 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2684 o->raw.wait_comp(sc, &o->raw))
2685 return ECORE_TIMEOUT;
2687 return ECORE_SUCCESS;
2690 static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2691 struct ecore_mcast_obj *o,
2692 struct ecore_mcast_ramrod_params *p,
2693 enum ecore_mcast_cmd cmd)
2696 struct ecore_pending_mcast_cmd *new_cmd;
2697 struct ecore_mcast_mac_elem *cur_mac = NULL;
2698 struct ecore_mcast_list_elem *pos;
2699 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2700 p->mcast_list_len : 0);
2702 /* If the command is empty ("handle pending commands only"), break */
2703 if (!p->mcast_list_len)
2704 return ECORE_SUCCESS;
2706 total_sz = sizeof(*new_cmd) +
2707 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2709 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2710 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2715 ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2716 cmd, macs_list_len);
2718 ECORE_LIST_INIT(&new_cmd->data.macs_head);
2720 new_cmd->type = cmd;
2721 new_cmd->done = FALSE;
2724 case ECORE_MCAST_CMD_ADD:
2725 cur_mac = (struct ecore_mcast_mac_elem *)
2726 ((uint8_t *)new_cmd + sizeof(*new_cmd));
2728 /* Push the MACs of the current command into the pending command
2731 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2732 struct ecore_mcast_list_elem) {
2733 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2734 ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2735 &new_cmd->data.macs_head);
2741 case ECORE_MCAST_CMD_DEL:
2742 new_cmd->data.macs_num = p->mcast_list_len;
2745 case ECORE_MCAST_CMD_RESTORE:
2746 new_cmd->data.next_bin = 0;
2750 ECORE_FREE(sc, new_cmd, total_sz);
2751 ECORE_ERR("Unknown command: %d\n", cmd);
2755 /* Push the new pending command to the tail of the pending list: FIFO */
2756 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2760 return ECORE_PENDING;
2764 * ecore_mcast_get_next_bin - get the next set bin (index)
2767 * @last: index to start looking from (including)
2769 * returns the next found (set) bin or a negative value if none is found.
2771 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2773 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2775 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2776 if (o->registry.aprox_match.vec[i])
2777 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2778 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2779 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2792 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2796 * returns the index of the found bin or -1 if none is found
2798 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2800 int cur_bit = ecore_mcast_get_next_bin(o, 0);
2803 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2808 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2810 struct ecore_raw_obj *raw = &o->raw;
2811 uint8_t rx_tx_flag = 0;
2813 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2814 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2815 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2817 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2818 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2819 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2824 static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2825 struct ecore_mcast_obj *o, int idx,
2826 union ecore_mcast_config_data *cfg_data,
2827 enum ecore_mcast_cmd cmd)
2829 struct ecore_raw_obj *r = &o->raw;
2830 struct eth_multicast_rules_ramrod_data *data =
2831 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2832 uint8_t func_id = r->func_id;
2833 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2836 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2837 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2839 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2841 /* Get a bin and update a bins' vector */
2843 case ECORE_MCAST_CMD_ADD:
2844 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2845 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2848 case ECORE_MCAST_CMD_DEL:
2849 /* If there were no more bins to clear
2850 * (ecore_mcast_clear_first_bin() returns -1) then we would
2851 * clear any (0xff) bin.
2852 * See ecore_mcast_validate_e2() for explanation when it may
2855 bin = ecore_mcast_clear_first_bin(o);
2858 case ECORE_MCAST_CMD_RESTORE:
2859 bin = cfg_data->bin;
2863 ECORE_ERR("Unknown command: %d\n", cmd);
2867 ECORE_MSG(sc, "%s bin %d\n",
2868 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2869 "Setting" : "Clearing"), bin);
2871 data->rules[idx].bin_id = (uint8_t)bin;
2872 data->rules[idx].func_id = func_id;
2873 data->rules[idx].engine_id = o->engine_id;
2877 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2879 * @sc: device handle
2881 * @start_bin: index in the registry to start from (including)
2882 * @rdata_idx: index in the ramrod data to start from
2884 * returns last handled bin index or -1 if all bins have been handled
2886 static inline int ecore_mcast_handle_restore_cmd_e2(
2887 struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
2890 int cur_bin, cnt = *rdata_idx;
2891 union ecore_mcast_config_data cfg_data = {NULL};
2893 /* go through the registry and configure the bins from it */
2894 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2895 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2897 cfg_data.bin = (uint8_t)cur_bin;
2898 o->set_one_rule(sc, o, cnt, &cfg_data,
2899 ECORE_MCAST_CMD_RESTORE);
2903 ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
2905 /* Break if we reached the maximum number
2908 if (cnt >= o->max_cmd_len)
2917 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
2918 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2921 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2922 int cnt = *line_idx;
2923 union ecore_mcast_config_data cfg_data = {NULL};
2925 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2926 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
2928 cfg_data.mac = &pmac_pos->mac[0];
2929 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2933 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
2934 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2936 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2937 &cmd_pos->data.macs_head);
2939 /* Break if we reached the maximum number
2942 if (cnt >= o->max_cmd_len)
2948 /* if no more MACs to configure - we are done */
2949 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2950 cmd_pos->done = TRUE;
2953 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
2954 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2957 int cnt = *line_idx;
2959 while (cmd_pos->data.macs_num) {
2960 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2964 cmd_pos->data.macs_num--;
2966 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
2967 cmd_pos->data.macs_num, cnt);
2969 /* Break if we reached the maximum
2972 if (cnt >= o->max_cmd_len)
2978 /* If we cleared all bins - we are done */
2979 if (!cmd_pos->data.macs_num)
2980 cmd_pos->done = TRUE;
2983 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
2984 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2987 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2990 if (cmd_pos->data.next_bin < 0)
2991 /* If o->set_restore returned -1 we are done */
2992 cmd_pos->done = TRUE;
2994 /* Start from the next bin next time */
2995 cmd_pos->data.next_bin++;
2998 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
2999 struct ecore_mcast_ramrod_params *p)
3001 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3003 struct ecore_mcast_obj *o = p->mcast_obj;
3005 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3006 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3007 switch (cmd_pos->type) {
3008 case ECORE_MCAST_CMD_ADD:
3009 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3012 case ECORE_MCAST_CMD_DEL:
3013 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3016 case ECORE_MCAST_CMD_RESTORE:
3017 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3022 ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3026 /* If the command has been completed - remove it from the list
3027 * and free the memory
3029 if (cmd_pos->done) {
3030 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3031 &o->pending_cmds_head);
3032 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3035 /* Break if we reached the maximum number of rules */
3036 if (cnt >= o->max_cmd_len)
3043 static inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3044 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3047 struct ecore_mcast_list_elem *mlist_pos;
3048 union ecore_mcast_config_data cfg_data = {NULL};
3049 int cnt = *line_idx;
3051 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3052 struct ecore_mcast_list_elem) {
3053 cfg_data.mac = mlist_pos->mac;
3054 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3058 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3059 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3065 static inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3066 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3069 int cnt = *line_idx, i;
3071 for (i = 0; i < p->mcast_list_len; i++) {
3072 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3076 ECORE_MSG(sc, "Deleting MAC. %d left\n",
3077 p->mcast_list_len - i - 1);
3084 * ecore_mcast_handle_current_cmd -
3086 * @sc: device handle
3089 * @start_cnt: first line in the ramrod data that may be used
3091 * This function is called iff there is enough place for the current command in
3093 * Returns number of lines filled in the ramrod data in total.
3095 static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3096 struct ecore_mcast_ramrod_params *p,
3097 enum ecore_mcast_cmd cmd,
3100 struct ecore_mcast_obj *o = p->mcast_obj;
3101 int cnt = start_cnt;
3103 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3106 case ECORE_MCAST_CMD_ADD:
3107 ecore_mcast_hdl_add(sc, o, p, &cnt);
3110 case ECORE_MCAST_CMD_DEL:
3111 ecore_mcast_hdl_del(sc, o, p, &cnt);
3114 case ECORE_MCAST_CMD_RESTORE:
3115 o->hdl_restore(sc, o, 0, &cnt);
3119 ECORE_ERR("Unknown command: %d\n", cmd);
3123 /* The current command has been handled */
3124 p->mcast_list_len = 0;
3129 static int ecore_mcast_validate_e2(struct bxe_softc *sc,
3130 struct ecore_mcast_ramrod_params *p,
3131 enum ecore_mcast_cmd cmd)
3133 struct ecore_mcast_obj *o = p->mcast_obj;
3134 int reg_sz = o->get_registry_size(o);
3137 /* DEL command deletes all currently configured MACs */
3138 case ECORE_MCAST_CMD_DEL:
3139 o->set_registry_size(o, 0);
3142 /* RESTORE command will restore the entire multicast configuration */
3143 case ECORE_MCAST_CMD_RESTORE:
3144 /* Here we set the approximate amount of work to do, which in
3145 * fact may be only less as some MACs in postponed ADD
3146 * command(s) scheduled before this command may fall into
3147 * the same bin and the actual number of bins set in the
3148 * registry would be less than we estimated here. See
3149 * ecore_mcast_set_one_rule_e2() for further details.
3151 p->mcast_list_len = reg_sz;
3154 case ECORE_MCAST_CMD_ADD:
3155 case ECORE_MCAST_CMD_CONT:
3156 /* Here we assume that all new MACs will fall into new bins.
3157 * However we will correct the real registry size after we
3158 * handle all pending commands.
3160 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3164 ECORE_ERR("Unknown command: %d\n", cmd);
3168 /* Increase the total number of MACs pending to be configured */
3169 o->total_pending_num += p->mcast_list_len;
3171 return ECORE_SUCCESS;
3174 static void ecore_mcast_revert_e2(struct bxe_softc *sc,
3175 struct ecore_mcast_ramrod_params *p,
3178 struct ecore_mcast_obj *o = p->mcast_obj;
3180 o->set_registry_size(o, old_num_bins);
3181 o->total_pending_num -= p->mcast_list_len;
3185 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3187 * @sc: device handle
3189 * @len: number of rules to handle
3191 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3192 struct ecore_mcast_ramrod_params *p,
3195 struct ecore_raw_obj *r = &p->mcast_obj->raw;
3196 struct eth_multicast_rules_ramrod_data *data =
3197 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3199 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3200 (ECORE_FILTER_MCAST_PENDING <<
3201 ECORE_SWCID_SHIFT));
3202 data->header.rule_cnt = len;
3206 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3208 * @sc: device handle
3211 * Recalculate the actual number of set bins in the registry using Brian
3212 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3214 * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3216 static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3217 struct ecore_mcast_obj *o)
3222 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3223 elem = o->registry.aprox_match.vec[i];
3228 o->set_registry_size(o, cnt);
3230 return ECORE_SUCCESS;
3233 static int ecore_mcast_setup_e2(struct bxe_softc *sc,
3234 struct ecore_mcast_ramrod_params *p,
3235 enum ecore_mcast_cmd cmd)
3237 struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3238 struct ecore_mcast_obj *o = p->mcast_obj;
3239 struct eth_multicast_rules_ramrod_data *data =
3240 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3243 /* Reset the ramrod data buffer */
3244 ECORE_MEMSET(data, 0, sizeof(*data));
3246 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3248 /* If there are no more pending commands - clear SCHEDULED state */
3249 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3252 /* The below may be TRUE iff there was enough room in ramrod
3253 * data for all pending commands and for the current
3254 * command. Otherwise the current command would have been added
3255 * to the pending commands and p->mcast_list_len would have been
3258 if (p->mcast_list_len > 0)
3259 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3261 /* We've pulled out some MACs - update the total number of
3264 o->total_pending_num -= cnt;
3267 ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3268 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3270 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3272 /* Update a registry size if there are no more pending operations.
3274 * We don't want to change the value of the registry size if there are
3275 * pending operations because we want it to always be equal to the
3276 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3277 * set bins after the last requested operation in order to properly
3278 * evaluate the size of the next DEL/RESTORE operation.
3280 * Note that we update the registry itself during command(s) handling
3281 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3282 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3283 * with a limited amount of update commands (per MAC/bin) and we don't
3284 * know in this scope what the actual state of bins configuration is
3285 * going to be after this ramrod.
3287 if (!o->total_pending_num)
3288 ecore_mcast_refresh_registry_e2(sc, o);
3290 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3291 * RAMROD_PENDING status immediately.
3293 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3294 raw->clear_pending(raw);
3295 return ECORE_SUCCESS;
3297 /* No need for an explicit memory barrier here as long we would
3298 * need to ensure the ordering of writing to the SPQ element
3299 * and updating of the SPQ producer which involves a memory
3300 * read and we will have to put a full memory barrier there
3301 * (inside ecore_sp_post()).
3305 rc = ecore_sp_post( sc,
3306 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3309 ETH_CONNECTION_TYPE);
3313 /* Ramrod completion is pending */
3314 return ECORE_PENDING;
3318 static int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3319 struct ecore_mcast_ramrod_params *p,
3320 enum ecore_mcast_cmd cmd)
3322 /* Mark, that there is a work to do */
3323 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3324 p->mcast_list_len = 1;
3326 return ECORE_SUCCESS;
3329 static void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3330 struct ecore_mcast_ramrod_params *p,
3336 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
3338 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3341 static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3342 struct ecore_mcast_obj *o,
3343 struct ecore_mcast_ramrod_params *p,
3344 uint32_t *mc_filter)
3346 struct ecore_mcast_list_elem *mlist_pos;
3349 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3350 struct ecore_mcast_list_elem) {
3351 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3352 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3354 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3355 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3357 /* bookkeeping... */
3358 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3363 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3364 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3365 uint32_t *mc_filter)
3369 for (bit = ecore_mcast_get_next_bin(o, 0);
3371 bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3372 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3373 ECORE_MSG(sc, "About to set bin %d\n", bit);
3377 /* On 57711 we write the multicast MACs' approximate match
3378 * table by directly into the TSTORM's internal RAM. So we don't
3379 * really need to handle any tricks to make it work.
3381 static int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3382 struct ecore_mcast_ramrod_params *p,
3383 enum ecore_mcast_cmd cmd)
3386 struct ecore_mcast_obj *o = p->mcast_obj;
3387 struct ecore_raw_obj *r = &o->raw;
3389 /* If CLEAR_ONLY has been requested - clear the registry
3390 * and clear a pending bit.
3392 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3393 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3395 /* Set the multicast filter bits before writing it into
3396 * the internal memory.
3399 case ECORE_MCAST_CMD_ADD:
3400 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3403 case ECORE_MCAST_CMD_DEL:
3405 "Invalidating multicast MACs configuration\n");
3407 /* clear the registry */
3408 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3409 sizeof(o->registry.aprox_match.vec));
3412 case ECORE_MCAST_CMD_RESTORE:
3413 ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3417 ECORE_ERR("Unknown command: %d\n", cmd);
3421 /* Set the mcast filter in the internal memory */
3422 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3423 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3425 /* clear the registry */
3426 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3427 sizeof(o->registry.aprox_match.vec));
3430 r->clear_pending(r);
3432 return ECORE_SUCCESS;
3435 static int ecore_mcast_validate_e1(struct bxe_softc *sc,
3436 struct ecore_mcast_ramrod_params *p,
3437 enum ecore_mcast_cmd cmd)
3439 struct ecore_mcast_obj *o = p->mcast_obj;
3440 int reg_sz = o->get_registry_size(o);
3443 /* DEL command deletes all currently configured MACs */
3444 case ECORE_MCAST_CMD_DEL:
3445 o->set_registry_size(o, 0);
3448 /* RESTORE command will restore the entire multicast configuration */
3449 case ECORE_MCAST_CMD_RESTORE:
3450 p->mcast_list_len = reg_sz;
3451 ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3452 cmd, p->mcast_list_len);
3455 case ECORE_MCAST_CMD_ADD:
3456 case ECORE_MCAST_CMD_CONT:
3457 /* Multicast MACs on 57710 are configured as unicast MACs and
3458 * there is only a limited number of CAM entries for that
3461 if (p->mcast_list_len > o->max_cmd_len) {
3462 ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3466 /* Every configured MAC should be cleared if DEL command is
3467 * called. Only the last ADD command is relevant as long as
3468 * every ADD commands overrides the previous configuration.
3470 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3471 if (p->mcast_list_len > 0)
3472 o->set_registry_size(o, p->mcast_list_len);
3477 ECORE_ERR("Unknown command: %d\n", cmd);
3481 /* We want to ensure that commands are executed one by one for 57710.
3482 * Therefore each none-empty command will consume o->max_cmd_len.
3484 if (p->mcast_list_len)
3485 o->total_pending_num += o->max_cmd_len;
3487 return ECORE_SUCCESS;
3490 static void ecore_mcast_revert_e1(struct bxe_softc *sc,
3491 struct ecore_mcast_ramrod_params *p,
3494 struct ecore_mcast_obj *o = p->mcast_obj;
3496 o->set_registry_size(o, old_num_macs);
3498 /* If current command hasn't been handled yet and we are
3499 * here means that it's meant to be dropped and we have to
3500 * update the number of outstanding MACs accordingly.
3502 if (p->mcast_list_len)
3503 o->total_pending_num -= o->max_cmd_len;
3506 static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3507 struct ecore_mcast_obj *o, int idx,
3508 union ecore_mcast_config_data *cfg_data,
3509 enum ecore_mcast_cmd cmd)
3511 struct ecore_raw_obj *r = &o->raw;
3512 struct mac_configuration_cmd *data =
3513 (struct mac_configuration_cmd *)(r->rdata);
3516 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3517 ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3518 &data->config_table[idx].middle_mac_addr,
3519 &data->config_table[idx].lsb_mac_addr,
3522 data->config_table[idx].vlan_id = 0;
3523 data->config_table[idx].pf_id = r->func_id;
3524 data->config_table[idx].clients_bit_vector =
3525 ECORE_CPU_TO_LE32(1 << r->cl_id);
3527 ECORE_SET_FLAG(data->config_table[idx].flags,
3528 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3529 T_ETH_MAC_COMMAND_SET);
3534 * ecore_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3536 * @sc: device handle
3538 * @len: number of rules to handle
3540 static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3541 struct ecore_mcast_ramrod_params *p,
3544 struct ecore_raw_obj *r = &p->mcast_obj->raw;
3545 struct mac_configuration_cmd *data =
3546 (struct mac_configuration_cmd *)(r->rdata);
3548 uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3549 ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3550 ECORE_MAX_MULTICAST*(1 + r->func_id));
3552 data->hdr.offset = offset;
3553 data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3554 data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3555 (ECORE_FILTER_MCAST_PENDING <<
3556 ECORE_SWCID_SHIFT));
3557 data->hdr.length = len;
3561 * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3563 * @sc: device handle
3565 * @start_idx: index in the registry to start from
3566 * @rdata_idx: index in the ramrod data to start from
3568 * restore command for 57710 is like all other commands - always a stand alone
3569 * command - start_idx and rdata_idx will always be 0. This function will always
3571 * returns -1 to comply with 57712 variant.
3573 static inline int ecore_mcast_handle_restore_cmd_e1(
3574 struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3577 struct ecore_mcast_mac_elem *elem;
3579 union ecore_mcast_config_data cfg_data = {NULL};
3581 /* go through the registry and configure the MACs from it. */
3582 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3583 struct ecore_mcast_mac_elem) {
3584 cfg_data.mac = &elem->mac[0];
3585 o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3589 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3590 cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3598 static inline int ecore_mcast_handle_pending_cmds_e1(
3599 struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3601 struct ecore_pending_mcast_cmd *cmd_pos;
3602 struct ecore_mcast_mac_elem *pmac_pos;
3603 struct ecore_mcast_obj *o = p->mcast_obj;
3604 union ecore_mcast_config_data cfg_data = {NULL};
3607 /* If nothing to be done - return */
3608 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3611 /* Handle the first command */
3612 cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3613 struct ecore_pending_mcast_cmd, link);
3615 switch (cmd_pos->type) {
3616 case ECORE_MCAST_CMD_ADD:
3617 ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3618 link, struct ecore_mcast_mac_elem) {
3619 cfg_data.mac = &pmac_pos->mac[0];
3620 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3624 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3625 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3629 case ECORE_MCAST_CMD_DEL:
3630 cnt = cmd_pos->data.macs_num;
3631 ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3634 case ECORE_MCAST_CMD_RESTORE:
3635 o->hdl_restore(sc, o, 0, &cnt);
3639 ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3643 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3644 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3650 * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3657 static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3658 uint16_t *fw_lo, uint8_t *mac)
3660 mac[1] = ((uint8_t *)fw_hi)[0];
3661 mac[0] = ((uint8_t *)fw_hi)[1];
3662 mac[3] = ((uint8_t *)fw_mid)[0];
3663 mac[2] = ((uint8_t *)fw_mid)[1];
3664 mac[5] = ((uint8_t *)fw_lo)[0];
3665 mac[4] = ((uint8_t *)fw_lo)[1];
3669 * ecore_mcast_refresh_registry_e1 -
3671 * @sc: device handle
3674 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3675 * and update the registry correspondingly: if ADD - allocate a memory and add
3676 * the entries to the registry (list), if DELETE - clear the registry and free
3679 static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3680 struct ecore_mcast_obj *o)
3682 struct ecore_raw_obj *raw = &o->raw;
3683 struct ecore_mcast_mac_elem *elem;
3684 struct mac_configuration_cmd *data =
3685 (struct mac_configuration_cmd *)(raw->rdata);
3687 /* If first entry contains a SET bit - the command was ADD,
3688 * otherwise - DEL_ALL
3690 if (ECORE_GET_FLAG(data->config_table[0].flags,
3691 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3692 int i, len = data->hdr.length;
3694 /* Break if it was a RESTORE command */
3695 if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3696 return ECORE_SUCCESS;
3698 elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3700 ECORE_ERR("Failed to allocate registry memory\n");
3704 for (i = 0; i < len; i++, elem++) {
3705 ecore_get_fw_mac_addr(
3706 &data->config_table[i].msb_mac_addr,
3707 &data->config_table[i].middle_mac_addr,
3708 &data->config_table[i].lsb_mac_addr,
3710 ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3711 elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3712 ECORE_LIST_PUSH_TAIL(&elem->link,
3713 &o->registry.exact_match.macs);
3716 elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3717 struct ecore_mcast_mac_elem,
3719 ECORE_MSG(sc, "Deleting a registry\n");
3720 ECORE_FREE(sc, elem, sizeof(*elem));
3721 ECORE_LIST_INIT(&o->registry.exact_match.macs);
3724 return ECORE_SUCCESS;
3727 static int ecore_mcast_setup_e1(struct bxe_softc *sc,
3728 struct ecore_mcast_ramrod_params *p,
3729 enum ecore_mcast_cmd cmd)
3731 struct ecore_mcast_obj *o = p->mcast_obj;
3732 struct ecore_raw_obj *raw = &o->raw;
3733 struct mac_configuration_cmd *data =
3734 (struct mac_configuration_cmd *)(raw->rdata);
3737 /* Reset the ramrod data buffer */
3738 ECORE_MEMSET(data, 0, sizeof(*data));
3740 /* First set all entries as invalid */
3741 for (i = 0; i < o->max_cmd_len ; i++)
3742 ECORE_SET_FLAG(data->config_table[i].flags,
3743 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3744 T_ETH_MAC_COMMAND_INVALIDATE);
3746 /* Handle pending commands first */
3747 cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3749 /* If there are no more pending commands - clear SCHEDULED state */
3750 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3753 /* The below may be TRUE iff there were no pending commands */
3755 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3757 /* For 57710 every command has o->max_cmd_len length to ensure that
3758 * commands are done one at a time.
3760 o->total_pending_num -= o->max_cmd_len;
3764 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3766 /* Set ramrod header (in particular, a number of entries to update) */
3767 ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3769 /* update a registry: we need the registry contents to be always up
3770 * to date in order to be able to execute a RESTORE opcode. Here
3771 * we use the fact that for 57710 we sent one command at a time
3772 * hence we may take the registry update out of the command handling
3773 * and do it in a simpler way here.
3775 rc = ecore_mcast_refresh_registry_e1(sc, o);
3779 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3780 * RAMROD_PENDING status immediately.
3782 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3783 raw->clear_pending(raw);
3784 return ECORE_SUCCESS;
3786 /* No need for an explicit memory barrier here as long we would
3787 * need to ensure the ordering of writing to the SPQ element
3788 * and updating of the SPQ producer which involves a memory
3789 * read and we will have to put a full memory barrier there
3790 * (inside ecore_sp_post()).
3794 rc = ecore_sp_post( sc,
3795 RAMROD_CMD_ID_ETH_SET_MAC,
3798 ETH_CONNECTION_TYPE);
3802 /* Ramrod completion is pending */
3803 return ECORE_PENDING;
3807 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3809 return o->registry.exact_match.num_macs_set;
3812 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3814 return o->registry.aprox_match.num_bins_set;
3817 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3820 o->registry.exact_match.num_macs_set = n;
3823 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3826 o->registry.aprox_match.num_bins_set = n;
3829 int ecore_config_mcast(struct bxe_softc *sc,
3830 struct ecore_mcast_ramrod_params *p,
3831 enum ecore_mcast_cmd cmd)
3833 struct ecore_mcast_obj *o = p->mcast_obj;
3834 struct ecore_raw_obj *r = &o->raw;
3835 int rc = 0, old_reg_size;
3837 /* This is needed to recover number of currently configured mcast macs
3838 * in case of failure.
3840 old_reg_size = o->get_registry_size(o);
3842 /* Do some calculations and checks */
3843 rc = o->validate(sc, p, cmd);
3847 /* Return if there is no work to do */
3848 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3849 return ECORE_SUCCESS;
3851 ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3852 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3854 /* Enqueue the current command to the pending list if we can't complete
3855 * it in the current iteration
3857 if (r->check_pending(r) ||
3858 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3859 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3863 /* As long as the current command is in a command list we
3864 * don't need to handle it separately.
3866 p->mcast_list_len = 0;
3869 if (!r->check_pending(r)) {
3871 /* Set 'pending' state */
3874 /* Configure the new classification in the chip */
3875 rc = o->config_mcast(sc, p, cmd);
3879 /* Wait for a ramrod completion if was requested */
3880 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3881 rc = o->wait_comp(sc, o);
3887 r->clear_pending(r);
3890 o->revert(sc, p, old_reg_size);
3895 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3897 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3898 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3899 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3902 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3904 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3905 ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3906 ECORE_SMP_MB_AFTER_CLEAR_BIT();
3909 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3911 return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3914 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3916 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3919 void ecore_init_mcast_obj(struct bxe_softc *sc,
3920 struct ecore_mcast_obj *mcast_obj,
3921 uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
3922 uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
3923 int state, unsigned long *pstate, ecore_obj_type type)
3925 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3927 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3928 rdata, rdata_mapping, state, pstate, type);
3930 mcast_obj->engine_id = engine_id;
3932 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3934 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3935 mcast_obj->check_sched = ecore_mcast_check_sched;
3936 mcast_obj->set_sched = ecore_mcast_set_sched;
3937 mcast_obj->clear_sched = ecore_mcast_clear_sched;
3939 if (CHIP_IS_E1(sc)) {
3940 mcast_obj->config_mcast = ecore_mcast_setup_e1;
3941 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3942 mcast_obj->hdl_restore =
3943 ecore_mcast_handle_restore_cmd_e1;
3944 mcast_obj->check_pending = ecore_mcast_check_pending;
3946 if (CHIP_REV_IS_SLOW(sc))
3947 mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
3949 mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
3951 mcast_obj->wait_comp = ecore_mcast_wait;
3952 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e1;
3953 mcast_obj->validate = ecore_mcast_validate_e1;
3954 mcast_obj->revert = ecore_mcast_revert_e1;
3955 mcast_obj->get_registry_size =
3956 ecore_mcast_get_registry_size_exact;
3957 mcast_obj->set_registry_size =
3958 ecore_mcast_set_registry_size_exact;
3960 /* 57710 is the only chip that uses the exact match for mcast
3963 ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
3965 } else if (CHIP_IS_E1H(sc)) {
3966 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3967 mcast_obj->enqueue_cmd = NULL;
3968 mcast_obj->hdl_restore = NULL;
3969 mcast_obj->check_pending = ecore_mcast_check_pending;
3971 /* 57711 doesn't send a ramrod, so it has unlimited credit
3974 mcast_obj->max_cmd_len = -1;
3975 mcast_obj->wait_comp = ecore_mcast_wait;
3976 mcast_obj->set_one_rule = NULL;
3977 mcast_obj->validate = ecore_mcast_validate_e1h;
3978 mcast_obj->revert = ecore_mcast_revert_e1h;
3979 mcast_obj->get_registry_size =
3980 ecore_mcast_get_registry_size_aprox;
3981 mcast_obj->set_registry_size =
3982 ecore_mcast_set_registry_size_aprox;
3984 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3985 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3986 mcast_obj->hdl_restore =
3987 ecore_mcast_handle_restore_cmd_e2;
3988 mcast_obj->check_pending = ecore_mcast_check_pending;
3989 /* TODO: There should be a proper HSI define for this number!!!
3991 mcast_obj->max_cmd_len = 16;
3992 mcast_obj->wait_comp = ecore_mcast_wait;
3993 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3994 mcast_obj->validate = ecore_mcast_validate_e2;
3995 mcast_obj->revert = ecore_mcast_revert_e2;
3996 mcast_obj->get_registry_size =
3997 ecore_mcast_get_registry_size_aprox;
3998 mcast_obj->set_registry_size =
3999 ecore_mcast_set_registry_size_aprox;
4003 /*************************** Credit handling **********************************/
4006 * atomic_add_ifless - add if the result is less than a given value.
4008 * @v: pointer of type ecore_atomic_t
4009 * @a: the amount to add to v...
4010 * @u: ...if (v + a) is less than u.
4012 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4015 static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4019 c = ECORE_ATOMIC_READ(v);
4021 if (ECORE_UNLIKELY(c + a >= u))
4024 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4025 if (ECORE_LIKELY(old == c))
4034 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4036 * @v: pointer of type ecore_atomic_t
4037 * @a: the amount to dec from v...
4038 * @u: ...if (v - a) is more or equal than u.
4040 * returns TRUE if (v - a) was more or equal than u, and FALSE
4043 static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4047 c = ECORE_ATOMIC_READ(v);
4049 if (ECORE_UNLIKELY(c - a < u))
4052 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4053 if (ECORE_LIKELY(old == c))
4061 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4066 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4072 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4078 /* Don't let to refill if credit + cnt > pool_sz */
4079 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4086 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4091 cur_credit = ECORE_ATOMIC_READ(&o->credit);
4096 static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4102 static bool ecore_credit_pool_get_entry(
4103 struct ecore_credit_pool_obj *o,
4110 /* Find "internal cam-offset" then add to base for this object... */
4111 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4113 /* Skip the current vector if there are no free entries in it */
4114 if (!o->pool_mirror[vec])
4117 /* If we've got here we are going to find a free entry */
4118 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4119 i < BIT_VEC64_ELEM_SZ; idx++, i++)
4121 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4123 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4124 *offset = o->base_pool_offset + idx;
4132 static bool ecore_credit_pool_put_entry(
4133 struct ecore_credit_pool_obj *o,
4136 if (offset < o->base_pool_offset)
4139 offset -= o->base_pool_offset;
4141 if (offset >= o->pool_sz)
4144 /* Return the entry to the pool */
4145 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4150 static bool ecore_credit_pool_put_entry_always_TRUE(
4151 struct ecore_credit_pool_obj *o,
4157 static bool ecore_credit_pool_get_entry_always_TRUE(
4158 struct ecore_credit_pool_obj *o,
4165 * ecore_init_credit_pool - initialize credit pool internals.
4168 * @base: Base entry in the CAM to use.
4169 * @credit: pool size.
4171 * If base is negative no CAM entries handling will be performed.
4172 * If credit is negative pool operations will always succeed (unlimited pool).
4175 static inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4176 int base, int credit)
4178 /* Zero the object first */
4179 ECORE_MEMSET(p, 0, sizeof(*p));
4181 /* Set the table to all 1s */
4182 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4184 /* Init a pool as full */
4185 ECORE_ATOMIC_SET(&p->credit, credit);
4187 /* The total poll size */
4188 p->pool_sz = credit;
4190 p->base_pool_offset = base;
4192 /* Commit the change */
4195 p->check = ecore_credit_pool_check;
4197 /* if pool credit is negative - disable the checks */
4199 p->put = ecore_credit_pool_put;
4200 p->get = ecore_credit_pool_get;
4201 p->put_entry = ecore_credit_pool_put_entry;
4202 p->get_entry = ecore_credit_pool_get_entry;
4204 p->put = ecore_credit_pool_always_TRUE;
4205 p->get = ecore_credit_pool_always_TRUE;
4206 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4207 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4210 /* If base is negative - disable entries handling */
4212 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4213 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4217 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
4218 struct ecore_credit_pool_obj *p, uint8_t func_id,
4221 /* TODO: this will be defined in consts as well... */
4222 #define ECORE_CAM_SIZE_EMUL 5
4226 if (CHIP_IS_E1(sc)) {
4227 /* In E1, Multicast is saved in cam... */
4228 if (!CHIP_REV_IS_SLOW(sc))
4229 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4231 cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4233 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4235 } else if (CHIP_IS_E1H(sc)) {
4236 /* CAM credit is equally divided between all active functions
4239 if ((func_num > 0)) {
4240 if (!CHIP_REV_IS_SLOW(sc))
4241 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4243 cam_sz = ECORE_CAM_SIZE_EMUL;
4244 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4246 /* this should never happen! Block MAC operations. */
4247 ecore_init_credit_pool(p, 0, 0);
4253 * CAM credit is equaly divided between all active functions
4256 if ((func_num > 1)) {
4257 if (!CHIP_REV_IS_SLOW(sc))
4258 cam_sz = (MAX_MAC_CREDIT_E2
4259 - GET_NUM_VFS_PER_PATH(sc))
4261 + GET_NUM_VFS_PER_PF(sc);
4263 cam_sz = ECORE_CAM_SIZE_EMUL;
4265 /* No need for CAM entries handling for 57712 and
4268 ecore_init_credit_pool(p, -1, cam_sz);
4269 } else if (func_num == 1) {
4270 if (!CHIP_REV_IS_SLOW(sc))
4271 cam_sz = MAX_MAC_CREDIT_E2;
4273 cam_sz = ECORE_CAM_SIZE_EMUL;
4275 /* No need for CAM entries handling for 57712 and
4278 ecore_init_credit_pool(p, -1, cam_sz);
4280 /* this should never happen! Block MAC operations. */
4281 ecore_init_credit_pool(p, 0, 0);
4286 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4287 struct ecore_credit_pool_obj *p,
4291 if (CHIP_IS_E1x(sc)) {
4292 /* There is no VLAN credit in HW on 57710 and 57711 only
4293 * MAC / MAC-VLAN can be set
4295 ecore_init_credit_pool(p, 0, -1);
4297 /* CAM credit is equally divided between all active functions
4301 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4302 ecore_init_credit_pool(p, func_id * credit, credit);
4304 /* this should never happen! Block VLAN operations. */
4305 ecore_init_credit_pool(p, 0, 0);
4309 /****************** RSS Configuration ******************/
4312 * ecore_setup_rss - configure RSS
4314 * @sc: device handle
4315 * @p: rss configuration
4317 * sends on UPDATE ramrod for that matter.
4319 static int ecore_setup_rss(struct bxe_softc *sc,
4320 struct ecore_config_rss_params *p)
4322 struct ecore_rss_config_obj *o = p->rss_obj;
4323 struct ecore_raw_obj *r = &o->raw;
4324 struct eth_rss_update_ramrod_data *data =
4325 (struct eth_rss_update_ramrod_data *)(r->rdata);
4326 uint8_t rss_mode = 0;
4329 ECORE_MEMSET(data, 0, sizeof(*data));
4331 ECORE_MSG(sc, "Configuring RSS\n");
4333 /* Set an echo field */
4334 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4335 (r->state << ECORE_SWCID_SHIFT));
4338 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4339 rss_mode = ETH_RSS_MODE_DISABLED;
4340 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4341 rss_mode = ETH_RSS_MODE_REGULAR;
4342 #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
4343 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags))
4344 rss_mode = ETH_RSS_MODE_ESX51;
4347 data->rss_mode = rss_mode;
4349 ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4351 /* RSS capabilities */
4352 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4353 data->capabilities |=
4354 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4356 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4357 data->capabilities |=
4358 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4360 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4361 data->capabilities |=
4362 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4364 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4365 data->capabilities |=
4366 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4368 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4369 data->capabilities |=
4370 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4372 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4373 data->capabilities |=
4374 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4376 if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
4377 data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask);
4378 data->udp_4tuple_dst_port_value =
4379 ECORE_CPU_TO_LE16(p->tunnel_value);
4383 data->rss_result_mask = p->rss_result_mask;
4386 data->rss_engine_id = o->engine_id;
4388 ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4390 /* Indirection table */
4391 ECORE_MEMCPY(data->indirection_table, p->ind_table,
4392 T_ETH_INDIRECTION_TABLE_SIZE);
4394 /* Remember the last configuration */
4395 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4399 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4400 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4401 sizeof(data->rss_key));
4402 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4405 /* No need for an explicit memory barrier here as long we would
4406 * need to ensure the ordering of writing to the SPQ element
4407 * and updating of the SPQ producer which involves a memory
4408 * read and we will have to put a full memory barrier there
4409 * (inside ecore_sp_post()).
4413 rc = ecore_sp_post(sc,
4414 RAMROD_CMD_ID_ETH_RSS_UPDATE,
4417 ETH_CONNECTION_TYPE);
4422 return ECORE_PENDING;
4425 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4428 ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4431 int ecore_config_rss(struct bxe_softc *sc,
4432 struct ecore_config_rss_params *p)
4435 struct ecore_rss_config_obj *o = p->rss_obj;
4436 struct ecore_raw_obj *r = &o->raw;
4438 /* Do nothing if only driver cleanup was requested */
4439 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4440 ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n",
4442 return ECORE_SUCCESS;
4447 rc = o->config_rss(sc, p);
4449 r->clear_pending(r);
4453 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4454 rc = r->wait_comp(sc, r);
4459 void ecore_init_rss_config_obj(struct bxe_softc *sc,
4460 struct ecore_rss_config_obj *rss_obj,
4461 uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4462 void *rdata, ecore_dma_addr_t rdata_mapping,
4463 int state, unsigned long *pstate,
4464 ecore_obj_type type)
4466 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4467 rdata_mapping, state, pstate, type);
4469 rss_obj->engine_id = engine_id;
4470 rss_obj->config_rss = ecore_setup_rss;
4473 int validate_vlan_mac(struct bxe_softc *sc,
4474 struct ecore_vlan_mac_obj *vlan_mac)
4476 if (!vlan_mac->get_n_elements) {
4477 ECORE_ERR("vlan mac object was not intialized\n");
4483 /********************** Queue state object ***********************************/
4486 * ecore_queue_state_change - perform Queue state change transition
4488 * @sc: device handle
4489 * @params: parameters to perform the transition
4491 * returns 0 in case of successfully completed transition, negative error
4492 * code in case of failure, positive (EBUSY) value if there is a completion
4493 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4494 * not set in params->ramrod_flags for asynchronous commands).
4497 int ecore_queue_state_change(struct bxe_softc *sc,
4498 struct ecore_queue_state_params *params)
4500 struct ecore_queue_sp_obj *o = params->q_obj;
4501 int rc, pending_bit;
4502 unsigned long *pending = &o->pending;
4504 /* Check that the requested transition is legal */
4505 rc = o->check_transition(sc, o, params);
4507 ECORE_ERR("check transition returned an error. rc %d\n", rc);
4511 /* Set "pending" bit */
4512 ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4513 pending_bit = o->set_pending(o, params);
4514 ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4516 /* Don't send a command if only driver cleanup was requested */
4517 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4518 o->complete_cmd(sc, o, pending_bit);
4521 rc = o->send_cmd(sc, params);
4523 o->next_state = ECORE_Q_STATE_MAX;
4524 ECORE_CLEAR_BIT(pending_bit, pending);
4525 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4529 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4530 rc = o->wait_comp(sc, o, pending_bit);
4534 return ECORE_SUCCESS;
4538 return ECORE_RET_PENDING(pending_bit, pending);
4541 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4542 struct ecore_queue_state_params *params)
4544 enum ecore_queue_cmd cmd = params->cmd, bit;
4546 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4549 if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4550 (cmd == ECORE_Q_CMD_DEACTIVATE))
4551 bit = ECORE_Q_CMD_UPDATE;
4555 ECORE_SET_BIT(bit, &obj->pending);
4559 static int ecore_queue_wait_comp(struct bxe_softc *sc,
4560 struct ecore_queue_sp_obj *o,
4561 enum ecore_queue_cmd cmd)
4563 return ecore_state_wait(sc, cmd, &o->pending);
4567 * ecore_queue_comp_cmd - complete the state change command.
4569 * @sc: device handle
4573 * Checks that the arrived completion is expected.
4575 static int ecore_queue_comp_cmd(struct bxe_softc *sc,
4576 struct ecore_queue_sp_obj *o,
4577 enum ecore_queue_cmd cmd)
4579 unsigned long cur_pending = o->pending;
4581 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4582 ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4583 cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4584 o->state, cur_pending, o->next_state);
4588 if (o->next_tx_only >= o->max_cos)
4589 /* >= because tx only must always be smaller than cos since the
4590 * primary connection supports COS 0
4592 ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4593 o->next_tx_only, o->max_cos);
4596 "Completing command %d for queue %d, setting state to %d\n",
4597 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4599 if (o->next_tx_only) /* print num tx-only if any exist */
4600 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4601 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4603 o->state = o->next_state;
4604 o->num_tx_only = o->next_tx_only;
4605 o->next_state = ECORE_Q_STATE_MAX;
4607 /* It's important that o->state and o->next_state are
4608 * updated before o->pending.
4612 ECORE_CLEAR_BIT(cmd, &o->pending);
4613 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4615 return ECORE_SUCCESS;
4618 static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4619 struct ecore_queue_state_params *cmd_params,
4620 struct client_init_ramrod_data *data)
4622 struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4626 /* IPv6 TPA supported for E2 and above only */
4627 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4629 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4632 static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4633 struct ecore_queue_sp_obj *o,
4634 struct ecore_general_setup_params *params,
4635 struct client_init_general_data *gen_data,
4636 unsigned long *flags)
4638 gen_data->client_id = o->cl_id;
4640 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4641 gen_data->statistics_counter_id =
4643 gen_data->statistics_en_flg = 1;
4644 gen_data->statistics_zero_flg =
4645 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4647 gen_data->statistics_counter_id =
4648 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4650 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4652 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4654 gen_data->sp_client_id = params->spcl_id;
4655 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4656 gen_data->func_id = o->func_id;
4658 gen_data->cos = params->cos;
4660 gen_data->traffic_type =
4661 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4662 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4664 ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4665 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4668 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4669 struct ecore_txq_setup_params *params,
4670 struct client_init_tx_data *tx_data,
4671 unsigned long *flags)
4673 tx_data->enforce_security_flg =
4674 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4675 tx_data->default_vlan =
4676 ECORE_CPU_TO_LE16(params->default_vlan);
4677 tx_data->default_vlan_flg =
4678 ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4679 tx_data->tx_switching_flg =
4680 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4681 tx_data->anti_spoofing_flg =
4682 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4683 tx_data->force_default_pri_flg =
4684 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4685 tx_data->refuse_outband_vlan_flg =
4686 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4687 tx_data->tunnel_lso_inc_ip_id =
4688 ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4689 tx_data->tunnel_non_lso_pcsum_location =
4690 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4693 tx_data->tx_status_block_id = params->fw_sb_id;
4694 tx_data->tx_sb_index_number = params->sb_cq_index;
4695 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4697 tx_data->tx_bd_page_base.lo =
4698 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4699 tx_data->tx_bd_page_base.hi =
4700 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4702 /* Don't configure any Tx switching mode during queue SETUP */
4706 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4707 struct rxq_pause_params *params,
4708 struct client_init_rx_data *rx_data)
4710 /* flow control data */
4711 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4712 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4713 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4714 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4715 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4716 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4717 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4720 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4721 struct ecore_rxq_setup_params *params,
4722 struct client_init_rx_data *rx_data,
4723 unsigned long *flags)
4725 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4726 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4727 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4728 CLIENT_INIT_RX_DATA_TPA_MODE;
4729 rx_data->vmqueue_mode_en_flg = 0;
4731 rx_data->extra_data_over_sgl_en_flg =
4732 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4733 rx_data->cache_line_alignment_log_size =
4734 params->cache_line_log;
4735 rx_data->enable_dynamic_hc =
4736 ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4737 rx_data->max_sges_for_packet = params->max_sges_pkt;
4738 rx_data->client_qzone_id = params->cl_qzone_id;
4739 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4741 /* Always start in DROP_ALL mode */
4742 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4743 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4745 /* We don't set drop flags */
4746 rx_data->drop_ip_cs_err_flg = 0;
4747 rx_data->drop_tcp_cs_err_flg = 0;
4748 rx_data->drop_ttl0_flg = 0;
4749 rx_data->drop_udp_cs_err_flg = 0;
4750 rx_data->inner_vlan_removal_enable_flg =
4751 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4752 rx_data->outer_vlan_removal_enable_flg =
4753 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4754 rx_data->status_block_id = params->fw_sb_id;
4755 rx_data->rx_sb_index_number = params->sb_cq_index;
4756 rx_data->max_tpa_queues = params->max_tpa_queues;
4757 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4758 rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4759 rx_data->bd_page_base.lo =
4760 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4761 rx_data->bd_page_base.hi =
4762 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4763 rx_data->sge_page_base.lo =
4764 ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4765 rx_data->sge_page_base.hi =
4766 ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4767 rx_data->cqe_page_base.lo =
4768 ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4769 rx_data->cqe_page_base.hi =
4770 ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4771 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4774 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4775 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4776 rx_data->is_approx_mcast = 1;
4779 rx_data->rss_engine_id = params->rss_engine_id;
4781 /* silent vlan removal */
4782 rx_data->silent_vlan_removal_flg =
4783 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4784 rx_data->silent_vlan_value =
4785 ECORE_CPU_TO_LE16(params->silent_removal_value);
4786 rx_data->silent_vlan_mask =
4787 ECORE_CPU_TO_LE16(params->silent_removal_mask);
4790 /* initialize the general, tx and rx parts of a queue object */
4791 static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4792 struct ecore_queue_state_params *cmd_params,
4793 struct client_init_ramrod_data *data)
4795 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4796 &cmd_params->params.setup.gen_params,
4798 &cmd_params->params.setup.flags);
4800 ecore_q_fill_init_tx_data(cmd_params->q_obj,
4801 &cmd_params->params.setup.txq_params,
4803 &cmd_params->params.setup.flags);
4805 ecore_q_fill_init_rx_data(cmd_params->q_obj,
4806 &cmd_params->params.setup.rxq_params,
4808 &cmd_params->params.setup.flags);
4810 ecore_q_fill_init_pause_data(cmd_params->q_obj,
4811 &cmd_params->params.setup.pause_params,
4815 /* initialize the general and tx parts of a tx-only queue object */
4816 static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4817 struct ecore_queue_state_params *cmd_params,
4818 struct tx_queue_init_ramrod_data *data)
4820 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4821 &cmd_params->params.tx_only.gen_params,
4823 &cmd_params->params.tx_only.flags);
4825 ecore_q_fill_init_tx_data(cmd_params->q_obj,
4826 &cmd_params->params.tx_only.txq_params,
4828 &cmd_params->params.tx_only.flags);
4830 ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4831 cmd_params->q_obj->cids[0],
4832 data->tx.tx_bd_page_base.lo,
4833 data->tx.tx_bd_page_base.hi);
4837 * ecore_q_init - init HW/FW queue
4839 * @sc: device handle
4842 * HW/FW initial Queue configuration:
4844 * - CDU context validation
4847 static inline int ecore_q_init(struct bxe_softc *sc,
4848 struct ecore_queue_state_params *params)
4850 struct ecore_queue_sp_obj *o = params->q_obj;
4851 struct ecore_queue_init_params *init = ¶ms->params.init;
4855 /* Tx HC configuration */
4856 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
4857 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
4858 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4860 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
4861 init->tx.sb_cq_index,
4862 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
4866 /* Rx HC configuration */
4867 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
4868 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
4869 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4871 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
4872 init->rx.sb_cq_index,
4873 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
4877 /* Set CDU context validation values */
4878 for (cos = 0; cos < o->max_cos; cos++) {
4879 ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
4881 ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
4882 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
4885 /* As no ramrod is sent, complete the command immediately */
4886 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
4891 return ECORE_SUCCESS;
4894 static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
4895 struct ecore_queue_state_params *params)
4897 struct ecore_queue_sp_obj *o = params->q_obj;
4898 struct client_init_ramrod_data *rdata =
4899 (struct client_init_ramrod_data *)o->rdata;
4900 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4901 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4903 /* Clear the ramrod data */
4904 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4906 /* Fill the ramrod data */
4907 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4909 /* No need for an explicit memory barrier here as long we would
4910 * need to ensure the ordering of writing to the SPQ element
4911 * and updating of the SPQ producer which involves a memory
4912 * read and we will have to put a full memory barrier there
4913 * (inside ecore_sp_post()).
4916 return ecore_sp_post(sc,
4918 o->cids[ECORE_PRIMARY_CID_INDEX],
4920 ETH_CONNECTION_TYPE);
4923 static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
4924 struct ecore_queue_state_params *params)
4926 struct ecore_queue_sp_obj *o = params->q_obj;
4927 struct client_init_ramrod_data *rdata =
4928 (struct client_init_ramrod_data *)o->rdata;
4929 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4930 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4932 /* Clear the ramrod data */
4933 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4935 /* Fill the ramrod data */
4936 ecore_q_fill_setup_data_cmn(sc, params, rdata);
4937 ecore_q_fill_setup_data_e2(sc, params, rdata);
4939 /* No need for an explicit memory barrier here as long we would
4940 * need to ensure the ordering of writing to the SPQ element
4941 * and updating of the SPQ producer which involves a memory
4942 * read and we will have to put a full memory barrier there
4943 * (inside ecore_sp_post()).
4946 return ecore_sp_post(sc,
4948 o->cids[ECORE_PRIMARY_CID_INDEX],
4950 ETH_CONNECTION_TYPE);
4953 static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
4954 struct ecore_queue_state_params *params)
4956 struct ecore_queue_sp_obj *o = params->q_obj;
4957 struct tx_queue_init_ramrod_data *rdata =
4958 (struct tx_queue_init_ramrod_data *)o->rdata;
4959 ecore_dma_addr_t data_mapping = o->rdata_mapping;
4960 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4961 struct ecore_queue_setup_tx_only_params *tx_only_params =
4962 ¶ms->params.tx_only;
4963 uint8_t cid_index = tx_only_params->cid_index;
4965 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4966 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4967 ECORE_MSG(sc, "sending forward tx-only ramrod");
4969 if (cid_index >= o->max_cos) {
4970 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
4971 o->cl_id, cid_index);
4975 ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
4976 tx_only_params->gen_params.cos,
4977 tx_only_params->gen_params.spcl_id);
4979 /* Clear the ramrod data */
4980 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4982 /* Fill the ramrod data */
4983 ecore_q_fill_setup_tx_only(sc, params, rdata);
4985 ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4986 o->cids[cid_index], rdata->general.client_id,
4987 rdata->general.sp_client_id, rdata->general.cos);
4989 /* No need for an explicit memory barrier here as long we would
4990 * need to ensure the ordering of writing to the SPQ element
4991 * and updating of the SPQ producer which involves a memory
4992 * read and we will have to put a full memory barrier there
4993 * (inside ecore_sp_post()).
4996 return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4997 data_mapping, ETH_CONNECTION_TYPE);
5000 static void ecore_q_fill_update_data(struct bxe_softc *sc,
5001 struct ecore_queue_sp_obj *obj,
5002 struct ecore_queue_update_params *params,
5003 struct client_update_ramrod_data *data)
5005 /* Client ID of the client to update */
5006 data->client_id = obj->cl_id;
5008 /* Function ID of the client to update */
5009 data->func_id = obj->func_id;
5011 /* Default VLAN value */
5012 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
5014 /* Inner VLAN stripping */
5015 data->inner_vlan_removal_enable_flg =
5016 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5017 ¶ms->update_flags);
5018 data->inner_vlan_removal_change_flg =
5019 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5020 ¶ms->update_flags);
5022 /* Outer VLAN stripping */
5023 data->outer_vlan_removal_enable_flg =
5024 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5025 ¶ms->update_flags);
5026 data->outer_vlan_removal_change_flg =
5027 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5028 ¶ms->update_flags);
5030 /* Drop packets that have source MAC that doesn't belong to this
5033 data->anti_spoofing_enable_flg =
5034 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5035 ¶ms->update_flags);
5036 data->anti_spoofing_change_flg =
5037 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5038 ¶ms->update_flags);
5040 /* Activate/Deactivate */
5041 data->activate_flg =
5042 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
5043 data->activate_change_flg =
5044 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5045 ¶ms->update_flags);
5047 /* Enable default VLAN */
5048 data->default_vlan_enable_flg =
5049 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5050 ¶ms->update_flags);
5051 data->default_vlan_change_flg =
5052 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5053 ¶ms->update_flags);
5055 /* silent vlan removal */
5056 data->silent_vlan_change_flg =
5057 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5058 ¶ms->update_flags);
5059 data->silent_vlan_removal_flg =
5060 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5061 ¶ms->update_flags);
5062 data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5063 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5066 data->tx_switching_flg =
5067 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5068 ¶ms->update_flags);
5069 data->tx_switching_change_flg =
5070 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5071 ¶ms->update_flags);
5074 static inline int ecore_q_send_update(struct bxe_softc *sc,
5075 struct ecore_queue_state_params *params)
5077 struct ecore_queue_sp_obj *o = params->q_obj;
5078 struct client_update_ramrod_data *rdata =
5079 (struct client_update_ramrod_data *)o->rdata;
5080 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5081 struct ecore_queue_update_params *update_params =
5082 ¶ms->params.update;
5083 uint8_t cid_index = update_params->cid_index;
5085 if (cid_index >= o->max_cos) {
5086 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5087 o->cl_id, cid_index);
5091 /* Clear the ramrod data */
5092 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5094 /* Fill the ramrod data */
5095 ecore_q_fill_update_data(sc, o, update_params, rdata);
5097 /* No need for an explicit memory barrier here as long we would
5098 * need to ensure the ordering of writing to the SPQ element
5099 * and updating of the SPQ producer which involves a memory
5100 * read and we will have to put a full memory barrier there
5101 * (inside ecore_sp_post()).
5104 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5105 o->cids[cid_index], data_mapping,
5106 ETH_CONNECTION_TYPE);
5110 * ecore_q_send_deactivate - send DEACTIVATE command
5112 * @sc: device handle
5115 * implemented using the UPDATE command.
5117 static inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5118 struct ecore_queue_state_params *params)
5120 struct ecore_queue_update_params *update = ¶ms->params.update;
5122 ECORE_MEMSET(update, 0, sizeof(*update));
5124 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5126 return ecore_q_send_update(sc, params);
5130 * ecore_q_send_activate - send ACTIVATE command
5132 * @sc: device handle
5135 * implemented using the UPDATE command.
5137 static inline int ecore_q_send_activate(struct bxe_softc *sc,
5138 struct ecore_queue_state_params *params)
5140 struct ecore_queue_update_params *update = ¶ms->params.update;
5142 ECORE_MEMSET(update, 0, sizeof(*update));
5144 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5145 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5147 return ecore_q_send_update(sc, params);
5150 static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5151 struct ecore_queue_state_params *params)
5153 /* TODO: Not implemented yet. */
5157 static inline int ecore_q_send_halt(struct bxe_softc *sc,
5158 struct ecore_queue_state_params *params)
5160 struct ecore_queue_sp_obj *o = params->q_obj;
5162 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5163 ecore_dma_addr_t data_mapping = 0;
5164 data_mapping = (ecore_dma_addr_t)o->cl_id;
5166 return ecore_sp_post(sc,
5167 RAMROD_CMD_ID_ETH_HALT,
5168 o->cids[ECORE_PRIMARY_CID_INDEX],
5170 ETH_CONNECTION_TYPE);
5173 static inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5174 struct ecore_queue_state_params *params)
5176 struct ecore_queue_sp_obj *o = params->q_obj;
5177 uint8_t cid_idx = params->params.cfc_del.cid_index;
5179 if (cid_idx >= o->max_cos) {
5180 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5185 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5186 o->cids[cid_idx], 0,
5187 NONE_CONNECTION_TYPE);
5190 static inline int ecore_q_send_terminate(struct bxe_softc *sc,
5191 struct ecore_queue_state_params *params)
5193 struct ecore_queue_sp_obj *o = params->q_obj;
5194 uint8_t cid_index = params->params.terminate.cid_index;
5196 if (cid_index >= o->max_cos) {
5197 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5198 o->cl_id, cid_index);
5202 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5203 o->cids[cid_index], 0,
5204 ETH_CONNECTION_TYPE);
5207 static inline int ecore_q_send_empty(struct bxe_softc *sc,
5208 struct ecore_queue_state_params *params)
5210 struct ecore_queue_sp_obj *o = params->q_obj;
5212 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5213 o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5214 ETH_CONNECTION_TYPE);
5217 static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5218 struct ecore_queue_state_params *params)
5220 switch (params->cmd) {
5221 case ECORE_Q_CMD_INIT:
5222 return ecore_q_init(sc, params);
5223 case ECORE_Q_CMD_SETUP_TX_ONLY:
5224 return ecore_q_send_setup_tx_only(sc, params);
5225 case ECORE_Q_CMD_DEACTIVATE:
5226 return ecore_q_send_deactivate(sc, params);
5227 case ECORE_Q_CMD_ACTIVATE:
5228 return ecore_q_send_activate(sc, params);
5229 case ECORE_Q_CMD_UPDATE:
5230 return ecore_q_send_update(sc, params);
5231 case ECORE_Q_CMD_UPDATE_TPA:
5232 return ecore_q_send_update_tpa(sc, params);
5233 case ECORE_Q_CMD_HALT:
5234 return ecore_q_send_halt(sc, params);
5235 case ECORE_Q_CMD_CFC_DEL:
5236 return ecore_q_send_cfc_del(sc, params);
5237 case ECORE_Q_CMD_TERMINATE:
5238 return ecore_q_send_terminate(sc, params);
5239 case ECORE_Q_CMD_EMPTY:
5240 return ecore_q_send_empty(sc, params);
5242 ECORE_ERR("Unknown command: %d\n", params->cmd);
5247 static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5248 struct ecore_queue_state_params *params)
5250 switch (params->cmd) {
5251 case ECORE_Q_CMD_SETUP:
5252 return ecore_q_send_setup_e1x(sc, params);
5253 case ECORE_Q_CMD_INIT:
5254 case ECORE_Q_CMD_SETUP_TX_ONLY:
5255 case ECORE_Q_CMD_DEACTIVATE:
5256 case ECORE_Q_CMD_ACTIVATE:
5257 case ECORE_Q_CMD_UPDATE:
5258 case ECORE_Q_CMD_UPDATE_TPA:
5259 case ECORE_Q_CMD_HALT:
5260 case ECORE_Q_CMD_CFC_DEL:
5261 case ECORE_Q_CMD_TERMINATE:
5262 case ECORE_Q_CMD_EMPTY:
5263 return ecore_queue_send_cmd_cmn(sc, params);
5265 ECORE_ERR("Unknown command: %d\n", params->cmd);
5270 static int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5271 struct ecore_queue_state_params *params)
5273 switch (params->cmd) {
5274 case ECORE_Q_CMD_SETUP:
5275 return ecore_q_send_setup_e2(sc, params);
5276 case ECORE_Q_CMD_INIT:
5277 case ECORE_Q_CMD_SETUP_TX_ONLY:
5278 case ECORE_Q_CMD_DEACTIVATE:
5279 case ECORE_Q_CMD_ACTIVATE:
5280 case ECORE_Q_CMD_UPDATE:
5281 case ECORE_Q_CMD_UPDATE_TPA:
5282 case ECORE_Q_CMD_HALT:
5283 case ECORE_Q_CMD_CFC_DEL:
5284 case ECORE_Q_CMD_TERMINATE:
5285 case ECORE_Q_CMD_EMPTY:
5286 return ecore_queue_send_cmd_cmn(sc, params);
5288 ECORE_ERR("Unknown command: %d\n", params->cmd);
5294 * ecore_queue_chk_transition - check state machine of a regular Queue
5296 * @sc: device handle
5301 * It both checks if the requested command is legal in a current
5302 * state and, if it's legal, sets a `next_state' in the object
5303 * that will be used in the completion flow to set the `state'
5306 * returns 0 if a requested command is a legal transition,
5307 * ECORE_INVAL otherwise.
5309 static int ecore_queue_chk_transition(struct bxe_softc *sc,
5310 struct ecore_queue_sp_obj *o,
5311 struct ecore_queue_state_params *params)
5313 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5314 enum ecore_queue_cmd cmd = params->cmd;
5315 struct ecore_queue_update_params *update_params =
5316 ¶ms->params.update;
5317 uint8_t next_tx_only = o->num_tx_only;
5319 /* Forget all pending for completion commands if a driver only state
5320 * transition has been requested.
5322 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5324 o->next_state = ECORE_Q_STATE_MAX;
5327 /* Don't allow a next state transition if we are in the middle of
5331 ECORE_ERR("Blocking transition since pending was %lx\n",
5337 case ECORE_Q_STATE_RESET:
5338 if (cmd == ECORE_Q_CMD_INIT)
5339 next_state = ECORE_Q_STATE_INITIALIZED;
5342 case ECORE_Q_STATE_INITIALIZED:
5343 if (cmd == ECORE_Q_CMD_SETUP) {
5344 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5345 ¶ms->params.setup.flags))
5346 next_state = ECORE_Q_STATE_ACTIVE;
5348 next_state = ECORE_Q_STATE_INACTIVE;
5352 case ECORE_Q_STATE_ACTIVE:
5353 if (cmd == ECORE_Q_CMD_DEACTIVATE)
5354 next_state = ECORE_Q_STATE_INACTIVE;
5356 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5357 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5358 next_state = ECORE_Q_STATE_ACTIVE;
5360 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5361 next_state = ECORE_Q_STATE_MULTI_COS;
5365 else if (cmd == ECORE_Q_CMD_HALT)
5366 next_state = ECORE_Q_STATE_STOPPED;
5368 else if (cmd == ECORE_Q_CMD_UPDATE) {
5369 /* If "active" state change is requested, update the
5370 * state accordingly.
5372 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5373 &update_params->update_flags) &&
5374 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5375 &update_params->update_flags))
5376 next_state = ECORE_Q_STATE_INACTIVE;
5378 next_state = ECORE_Q_STATE_ACTIVE;
5382 case ECORE_Q_STATE_MULTI_COS:
5383 if (cmd == ECORE_Q_CMD_TERMINATE)
5384 next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5386 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5387 next_state = ECORE_Q_STATE_MULTI_COS;
5388 next_tx_only = o->num_tx_only + 1;
5391 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5392 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5393 next_state = ECORE_Q_STATE_MULTI_COS;
5395 else if (cmd == ECORE_Q_CMD_UPDATE) {
5396 /* If "active" state change is requested, update the
5397 * state accordingly.
5399 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5400 &update_params->update_flags) &&
5401 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5402 &update_params->update_flags))
5403 next_state = ECORE_Q_STATE_INACTIVE;
5405 next_state = ECORE_Q_STATE_MULTI_COS;
5409 case ECORE_Q_STATE_MCOS_TERMINATED:
5410 if (cmd == ECORE_Q_CMD_CFC_DEL) {
5411 next_tx_only = o->num_tx_only - 1;
5412 if (next_tx_only == 0)
5413 next_state = ECORE_Q_STATE_ACTIVE;
5415 next_state = ECORE_Q_STATE_MULTI_COS;
5419 case ECORE_Q_STATE_INACTIVE:
5420 if (cmd == ECORE_Q_CMD_ACTIVATE)
5421 next_state = ECORE_Q_STATE_ACTIVE;
5423 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5424 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5425 next_state = ECORE_Q_STATE_INACTIVE;
5427 else if (cmd == ECORE_Q_CMD_HALT)
5428 next_state = ECORE_Q_STATE_STOPPED;
5430 else if (cmd == ECORE_Q_CMD_UPDATE) {
5431 /* If "active" state change is requested, update the
5432 * state accordingly.
5434 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5435 &update_params->update_flags) &&
5436 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5437 &update_params->update_flags)){
5438 if (o->num_tx_only == 0)
5439 next_state = ECORE_Q_STATE_ACTIVE;
5440 else /* tx only queues exist for this queue */
5441 next_state = ECORE_Q_STATE_MULTI_COS;
5443 next_state = ECORE_Q_STATE_INACTIVE;
5447 case ECORE_Q_STATE_STOPPED:
5448 if (cmd == ECORE_Q_CMD_TERMINATE)
5449 next_state = ECORE_Q_STATE_TERMINATED;
5452 case ECORE_Q_STATE_TERMINATED:
5453 if (cmd == ECORE_Q_CMD_CFC_DEL)
5454 next_state = ECORE_Q_STATE_RESET;
5458 ECORE_ERR("Illegal state: %d\n", state);
5461 /* Transition is assured */
5462 if (next_state != ECORE_Q_STATE_MAX) {
5463 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5464 state, cmd, next_state);
5465 o->next_state = next_state;
5466 o->next_tx_only = next_tx_only;
5467 return ECORE_SUCCESS;
5470 ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5476 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5478 * @sc: device handle
5482 * It both checks if the requested command is legal in a current
5483 * state and, if it's legal, sets a `next_state' in the object
5484 * that will be used in the completion flow to set the `state'
5487 * returns 0 if a requested command is a legal transition,
5488 * ECORE_INVAL otherwise.
5490 static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5491 struct ecore_queue_sp_obj *o,
5492 struct ecore_queue_state_params *params)
5494 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5495 enum ecore_queue_cmd cmd = params->cmd;
5498 case ECORE_Q_STATE_RESET:
5499 if (cmd == ECORE_Q_CMD_INIT)
5500 next_state = ECORE_Q_STATE_INITIALIZED;
5503 case ECORE_Q_STATE_INITIALIZED:
5504 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5505 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5506 ¶ms->params.tx_only.flags))
5507 next_state = ECORE_Q_STATE_ACTIVE;
5509 next_state = ECORE_Q_STATE_INACTIVE;
5513 case ECORE_Q_STATE_ACTIVE:
5514 case ECORE_Q_STATE_INACTIVE:
5515 if (cmd == ECORE_Q_CMD_CFC_DEL)
5516 next_state = ECORE_Q_STATE_RESET;
5520 ECORE_ERR("Illegal state: %d\n", state);
5523 /* Transition is assured */
5524 if (next_state != ECORE_Q_STATE_MAX) {
5525 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5526 state, cmd, next_state);
5527 o->next_state = next_state;
5528 return ECORE_SUCCESS;
5531 ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5535 void ecore_init_queue_obj(struct bxe_softc *sc,
5536 struct ecore_queue_sp_obj *obj,
5537 uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5539 ecore_dma_addr_t rdata_mapping, unsigned long type)
5541 ECORE_MEMSET(obj, 0, sizeof(*obj));
5543 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5544 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5546 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5547 obj->max_cos = cid_cnt;
5549 obj->func_id = func_id;
5551 obj->rdata_mapping = rdata_mapping;
5553 obj->next_state = ECORE_Q_STATE_MAX;
5555 if (CHIP_IS_E1x(sc))
5556 obj->send_cmd = ecore_queue_send_cmd_e1x;
5558 obj->send_cmd = ecore_queue_send_cmd_e2;
5560 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5561 obj->check_transition = ecore_queue_chk_fwd_transition;
5563 obj->check_transition = ecore_queue_chk_transition;
5565 obj->complete_cmd = ecore_queue_comp_cmd;
5566 obj->wait_comp = ecore_queue_wait_comp;
5567 obj->set_pending = ecore_queue_set_pending;
5570 /* return a queue object's logical state*/
5571 int ecore_get_q_logical_state(struct bxe_softc *sc,
5572 struct ecore_queue_sp_obj *obj)
5574 switch (obj->state) {
5575 case ECORE_Q_STATE_ACTIVE:
5576 case ECORE_Q_STATE_MULTI_COS:
5577 return ECORE_Q_LOGICAL_STATE_ACTIVE;
5578 case ECORE_Q_STATE_RESET:
5579 case ECORE_Q_STATE_INITIALIZED:
5580 case ECORE_Q_STATE_MCOS_TERMINATED:
5581 case ECORE_Q_STATE_INACTIVE:
5582 case ECORE_Q_STATE_STOPPED:
5583 case ECORE_Q_STATE_TERMINATED:
5584 case ECORE_Q_STATE_FLRED:
5585 return ECORE_Q_LOGICAL_STATE_STOPPED;
5591 /********************** Function state object *********************************/
5592 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5593 struct ecore_func_sp_obj *o)
5595 /* in the middle of transaction - return INVALID state */
5597 return ECORE_F_STATE_MAX;
5599 /* unsure the order of reading of o->pending and o->state
5600 * o->pending should be read first
5607 static int ecore_func_wait_comp(struct bxe_softc *sc,
5608 struct ecore_func_sp_obj *o,
5609 enum ecore_func_cmd cmd)
5611 return ecore_state_wait(sc, cmd, &o->pending);
5615 * ecore_func_state_change_comp - complete the state machine transition
5617 * @sc: device handle
5621 * Called on state change transition. Completes the state
5622 * machine transition only - no HW interaction.
5624 static inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5625 struct ecore_func_sp_obj *o,
5626 enum ecore_func_cmd cmd)
5628 unsigned long cur_pending = o->pending;
5630 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5631 ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5632 cmd, ECORE_FUNC_ID(sc), o->state,
5633 cur_pending, o->next_state);
5638 "Completing command %d for func %d, setting state to %d\n",
5639 cmd, ECORE_FUNC_ID(sc), o->next_state);
5641 o->state = o->next_state;
5642 o->next_state = ECORE_F_STATE_MAX;
5644 /* It's important that o->state and o->next_state are
5645 * updated before o->pending.
5649 ECORE_CLEAR_BIT(cmd, &o->pending);
5650 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5652 return ECORE_SUCCESS;
5656 * ecore_func_comp_cmd - complete the state change command
5658 * @sc: device handle
5662 * Checks that the arrived completion is expected.
5664 static int ecore_func_comp_cmd(struct bxe_softc *sc,
5665 struct ecore_func_sp_obj *o,
5666 enum ecore_func_cmd cmd)
5668 /* Complete the state machine part first, check if it's a
5671 int rc = ecore_func_state_change_comp(sc, o, cmd);
5676 * ecore_func_chk_transition - perform function state machine transition
5678 * @sc: device handle
5682 * It both checks if the requested command is legal in a current
5683 * state and, if it's legal, sets a `next_state' in the object
5684 * that will be used in the completion flow to set the `state'
5687 * returns 0 if a requested command is a legal transition,
5688 * ECORE_INVAL otherwise.
5690 static int ecore_func_chk_transition(struct bxe_softc *sc,
5691 struct ecore_func_sp_obj *o,
5692 struct ecore_func_state_params *params)
5694 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5695 enum ecore_func_cmd cmd = params->cmd;
5697 /* Forget all pending for completion commands if a driver only state
5698 * transition has been requested.
5700 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5702 o->next_state = ECORE_F_STATE_MAX;
5705 /* Don't allow a next state transition if we are in the middle of
5712 case ECORE_F_STATE_RESET:
5713 if (cmd == ECORE_F_CMD_HW_INIT)
5714 next_state = ECORE_F_STATE_INITIALIZED;
5717 case ECORE_F_STATE_INITIALIZED:
5718 if (cmd == ECORE_F_CMD_START)
5719 next_state = ECORE_F_STATE_STARTED;
5721 else if (cmd == ECORE_F_CMD_HW_RESET)
5722 next_state = ECORE_F_STATE_RESET;
5725 case ECORE_F_STATE_STARTED:
5726 if (cmd == ECORE_F_CMD_STOP)
5727 next_state = ECORE_F_STATE_INITIALIZED;
5728 /* afex ramrods can be sent only in started mode, and only
5729 * if not pending for function_stop ramrod completion
5730 * for these events - next state remained STARTED.
5732 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5733 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5734 next_state = ECORE_F_STATE_STARTED;
5736 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5737 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5738 next_state = ECORE_F_STATE_STARTED;
5740 /* Switch_update ramrod can be sent in either started or
5741 * tx_stopped state, and it doesn't change the state.
5743 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5744 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5745 next_state = ECORE_F_STATE_STARTED;
5747 else if (cmd == ECORE_F_CMD_TX_STOP)
5748 next_state = ECORE_F_STATE_TX_STOPPED;
5751 case ECORE_F_STATE_TX_STOPPED:
5752 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5753 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5754 next_state = ECORE_F_STATE_TX_STOPPED;
5756 else if (cmd == ECORE_F_CMD_TX_START)
5757 next_state = ECORE_F_STATE_STARTED;
5761 ECORE_ERR("Unknown state: %d\n", state);
5764 /* Transition is assured */
5765 if (next_state != ECORE_F_STATE_MAX) {
5766 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5767 state, cmd, next_state);
5768 o->next_state = next_state;
5769 return ECORE_SUCCESS;
5772 ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5779 * ecore_func_init_func - performs HW init at function stage
5781 * @sc: device handle
5784 * Init HW when the current phase is
5785 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5788 static inline int ecore_func_init_func(struct bxe_softc *sc,
5789 const struct ecore_func_sp_drv_ops *drv)
5791 return drv->init_hw_func(sc);
5795 * ecore_func_init_port - performs HW init at port stage
5797 * @sc: device handle
5800 * Init HW when the current phase is
5801 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5802 * FUNCTION-only HW blocks.
5805 static inline int ecore_func_init_port(struct bxe_softc *sc,
5806 const struct ecore_func_sp_drv_ops *drv)
5808 int rc = drv->init_hw_port(sc);
5812 return ecore_func_init_func(sc, drv);
5816 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
5818 * @sc: device handle
5821 * Init HW when the current phase is
5822 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5823 * PORT-only and FUNCTION-only HW blocks.
5825 static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
5826 const struct ecore_func_sp_drv_ops *drv)
5828 int rc = drv->init_hw_cmn_chip(sc);
5832 return ecore_func_init_port(sc, drv);
5836 * ecore_func_init_cmn - performs HW init at common stage
5838 * @sc: device handle
5841 * Init HW when the current phase is
5842 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5843 * PORT-only and FUNCTION-only HW blocks.
5845 static inline int ecore_func_init_cmn(struct bxe_softc *sc,
5846 const struct ecore_func_sp_drv_ops *drv)
5848 int rc = drv->init_hw_cmn(sc);
5852 return ecore_func_init_port(sc, drv);
5855 static int ecore_func_hw_init(struct bxe_softc *sc,
5856 struct ecore_func_state_params *params)
5858 uint32_t load_code = params->params.hw_init.load_phase;
5859 struct ecore_func_sp_obj *o = params->f_obj;
5860 const struct ecore_func_sp_drv_ops *drv = o->drv;
5863 ECORE_MSG(sc, "function %d load_code %x\n",
5864 ECORE_ABS_FUNC_ID(sc), load_code);
5866 /* Prepare buffers for unzipping the FW */
5867 rc = drv->gunzip_init(sc);
5872 rc = drv->init_fw(sc);
5874 ECORE_ERR("Error loading firmware\n");
5878 /* Handle the beginning of COMMON_XXX pases separately... */
5879 switch (load_code) {
5880 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5881 rc = ecore_func_init_cmn_chip(sc, drv);
5886 case FW_MSG_CODE_DRV_LOAD_COMMON:
5887 rc = ecore_func_init_cmn(sc, drv);
5892 case FW_MSG_CODE_DRV_LOAD_PORT:
5893 rc = ecore_func_init_port(sc, drv);
5898 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5899 rc = ecore_func_init_func(sc, drv);
5905 ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5910 drv->gunzip_end(sc);
5912 /* In case of success, complete the command immediately: no ramrods
5916 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
5922 * ecore_func_reset_func - reset HW at function stage
5924 * @sc: device handle
5927 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5928 * FUNCTION-only HW blocks.
5930 static inline void ecore_func_reset_func(struct bxe_softc *sc,
5931 const struct ecore_func_sp_drv_ops *drv)
5933 drv->reset_hw_func(sc);
5937 * ecore_func_reset_port - reser HW at port stage
5939 * @sc: device handle
5942 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5943 * FUNCTION-only and PORT-only HW blocks.
5947 * It's important to call reset_port before reset_func() as the last thing
5948 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5949 * makes impossible any DMAE transactions.
5951 static inline void ecore_func_reset_port(struct bxe_softc *sc,
5952 const struct ecore_func_sp_drv_ops *drv)
5954 drv->reset_hw_port(sc);
5955 ecore_func_reset_func(sc, drv);
5959 * ecore_func_reset_cmn - reser HW at common stage
5961 * @sc: device handle
5964 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5965 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5966 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5968 static inline void ecore_func_reset_cmn(struct bxe_softc *sc,
5969 const struct ecore_func_sp_drv_ops *drv)
5971 ecore_func_reset_port(sc, drv);
5972 drv->reset_hw_cmn(sc);
5975 static inline int ecore_func_hw_reset(struct bxe_softc *sc,
5976 struct ecore_func_state_params *params)
5978 uint32_t reset_phase = params->params.hw_reset.reset_phase;
5979 struct ecore_func_sp_obj *o = params->f_obj;
5980 const struct ecore_func_sp_drv_ops *drv = o->drv;
5982 ECORE_MSG(sc, "function %d reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
5985 switch (reset_phase) {
5986 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5987 ecore_func_reset_cmn(sc, drv);
5989 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5990 ecore_func_reset_port(sc, drv);
5992 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5993 ecore_func_reset_func(sc, drv);
5996 ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
6001 /* Complete the command immediately: no ramrods have been sent. */
6002 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
6004 return ECORE_SUCCESS;
6007 static inline int ecore_func_send_start(struct bxe_softc *sc,
6008 struct ecore_func_state_params *params)
6010 struct ecore_func_sp_obj *o = params->f_obj;
6011 struct function_start_data *rdata =
6012 (struct function_start_data *)o->rdata;
6013 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6014 struct ecore_func_start_params *start_params = ¶ms->params.start;
6016 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6018 /* Fill the ramrod data with provided parameters */
6019 rdata->function_mode = (uint8_t)start_params->mf_mode;
6020 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
6021 rdata->path_id = ECORE_PATH_ID(sc);
6022 rdata->network_cos_mode = start_params->network_cos_mode;
6023 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
6024 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
6027 * No need for an explicit memory barrier here as long we would
6028 * need to ensure the ordering of writing to the SPQ element
6029 * and updating of the SPQ producer which involves a memory
6030 * read and we will have to put a full memory barrier there
6031 * (inside ecore_sp_post()).
6034 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6035 data_mapping, NONE_CONNECTION_TYPE);
6038 static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6039 struct ecore_func_state_params *params)
6041 struct ecore_func_sp_obj *o = params->f_obj;
6042 struct function_update_data *rdata =
6043 (struct function_update_data *)o->rdata;
6044 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6045 struct ecore_func_switch_update_params *switch_update_params =
6046 ¶ms->params.switch_update;
6048 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6050 /* Fill the ramrod data with provided parameters */
6051 rdata->tx_switch_suspend_change_flg = 1;
6052 rdata->tx_switch_suspend = switch_update_params->suspend;
6053 rdata->echo = SWITCH_UPDATE;
6055 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6056 data_mapping, NONE_CONNECTION_TYPE);
6059 static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6060 struct ecore_func_state_params *params)
6062 struct ecore_func_sp_obj *o = params->f_obj;
6063 struct function_update_data *rdata =
6064 (struct function_update_data *)o->afex_rdata;
6065 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6066 struct ecore_func_afex_update_params *afex_update_params =
6067 ¶ms->params.afex_update;
6069 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6071 /* Fill the ramrod data with provided parameters */
6072 rdata->vif_id_change_flg = 1;
6073 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6074 rdata->afex_default_vlan_change_flg = 1;
6075 rdata->afex_default_vlan =
6076 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6077 rdata->allowed_priorities_change_flg = 1;
6078 rdata->allowed_priorities = afex_update_params->allowed_priorities;
6079 rdata->echo = AFEX_UPDATE;
6081 /* No need for an explicit memory barrier here as long we would
6082 * need to ensure the ordering of writing to the SPQ element
6083 * and updating of the SPQ producer which involves a memory
6084 * read and we will have to put a full memory barrier there
6085 * (inside ecore_sp_post()).
6088 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6090 rdata->afex_default_vlan, rdata->allowed_priorities);
6092 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6093 data_mapping, NONE_CONNECTION_TYPE);
6097 inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6098 struct ecore_func_state_params *params)
6100 struct ecore_func_sp_obj *o = params->f_obj;
6101 struct afex_vif_list_ramrod_data *rdata =
6102 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
6103 struct ecore_func_afex_viflists_params *afex_vif_params =
6104 ¶ms->params.afex_viflists;
6105 uint64_t *p_rdata = (uint64_t *)rdata;
6107 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6109 /* Fill the ramrod data with provided parameters */
6110 rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6111 rdata->func_bit_map = afex_vif_params->func_bit_map;
6112 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6113 rdata->func_to_clear = afex_vif_params->func_to_clear;
6115 /* send in echo type of sub command */
6116 rdata->echo = afex_vif_params->afex_vif_list_command;
6118 /* No need for an explicit memory barrier here as long we would
6119 * need to ensure the ordering of writing to the SPQ element
6120 * and updating of the SPQ producer which involves a memory
6121 * read and we will have to put a full memory barrier there
6122 * (inside ecore_sp_post()).
6125 ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6126 rdata->afex_vif_list_command, rdata->vif_list_index,
6127 rdata->func_bit_map, rdata->func_to_clear);
6129 /* this ramrod sends data directly and not through DMA mapping */
6130 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6131 *p_rdata, NONE_CONNECTION_TYPE);
6134 static inline int ecore_func_send_stop(struct bxe_softc *sc,
6135 struct ecore_func_state_params *params)
6137 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6138 NONE_CONNECTION_TYPE);
6141 static inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6142 struct ecore_func_state_params *params)
6144 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6145 NONE_CONNECTION_TYPE);
6147 static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6148 struct ecore_func_state_params *params)
6150 struct ecore_func_sp_obj *o = params->f_obj;
6151 struct flow_control_configuration *rdata =
6152 (struct flow_control_configuration *)o->rdata;
6153 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6154 struct ecore_func_tx_start_params *tx_start_params =
6155 ¶ms->params.tx_start;
6158 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6160 rdata->dcb_enabled = tx_start_params->dcb_enabled;
6161 rdata->dcb_version = tx_start_params->dcb_version;
6162 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6164 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6165 rdata->traffic_type_to_priority_cos[i] =
6166 tx_start_params->traffic_type_to_priority_cos[i];
6168 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6169 data_mapping, NONE_CONNECTION_TYPE);
6172 static int ecore_func_send_cmd(struct bxe_softc *sc,
6173 struct ecore_func_state_params *params)
6175 switch (params->cmd) {
6176 case ECORE_F_CMD_HW_INIT:
6177 return ecore_func_hw_init(sc, params);
6178 case ECORE_F_CMD_START:
6179 return ecore_func_send_start(sc, params);
6180 case ECORE_F_CMD_STOP:
6181 return ecore_func_send_stop(sc, params);
6182 case ECORE_F_CMD_HW_RESET:
6183 return ecore_func_hw_reset(sc, params);
6184 case ECORE_F_CMD_AFEX_UPDATE:
6185 return ecore_func_send_afex_update(sc, params);
6186 case ECORE_F_CMD_AFEX_VIFLISTS:
6187 return ecore_func_send_afex_viflists(sc, params);
6188 case ECORE_F_CMD_TX_STOP:
6189 return ecore_func_send_tx_stop(sc, params);
6190 case ECORE_F_CMD_TX_START:
6191 return ecore_func_send_tx_start(sc, params);
6192 case ECORE_F_CMD_SWITCH_UPDATE:
6193 return ecore_func_send_switch_update(sc, params);
6195 ECORE_ERR("Unknown command: %d\n", params->cmd);
6200 void ecore_init_func_obj(struct bxe_softc *sc,
6201 struct ecore_func_sp_obj *obj,
6202 void *rdata, ecore_dma_addr_t rdata_mapping,
6203 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6204 struct ecore_func_sp_drv_ops *drv_iface)
6206 ECORE_MEMSET(obj, 0, sizeof(*obj));
6208 ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6211 obj->rdata_mapping = rdata_mapping;
6212 obj->afex_rdata = afex_rdata;
6213 obj->afex_rdata_mapping = afex_rdata_mapping;
6214 obj->send_cmd = ecore_func_send_cmd;
6215 obj->check_transition = ecore_func_chk_transition;
6216 obj->complete_cmd = ecore_func_comp_cmd;
6217 obj->wait_comp = ecore_func_wait_comp;
6218 obj->drv = drv_iface;
6222 * ecore_func_state_change - perform Function state change transition
6224 * @sc: device handle
6225 * @params: parameters to perform the transaction
6227 * returns 0 in case of successfully completed transition,
6228 * negative error code in case of failure, positive
6229 * (EBUSY) value if there is a completion to that is
6230 * still pending (possible only if RAMROD_COMP_WAIT is
6231 * not set in params->ramrod_flags for asynchronous
6234 int ecore_func_state_change(struct bxe_softc *sc,
6235 struct ecore_func_state_params *params)
6237 struct ecore_func_sp_obj *o = params->f_obj;
6239 enum ecore_func_cmd cmd = params->cmd;
6240 unsigned long *pending = &o->pending;
6242 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6244 /* Check that the requested transition is legal */
6245 rc = o->check_transition(sc, o, params);
6246 if ((rc == ECORE_BUSY) &&
6247 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) {
6248 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6249 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6251 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6252 rc = o->check_transition(sc, o, params);
6254 if (rc == ECORE_BUSY) {
6255 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6256 ECORE_ERR("timeout waiting for previous ramrod completion\n");
6260 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6264 /* Set "pending" bit */
6265 ECORE_SET_BIT(cmd, pending);
6267 /* Don't send a command if only driver cleanup was requested */
6268 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
6269 ecore_func_state_change_comp(sc, o, cmd);
6270 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6273 rc = o->send_cmd(sc, params);
6275 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6278 o->next_state = ECORE_F_STATE_MAX;
6279 ECORE_CLEAR_BIT(cmd, pending);
6280 ECORE_SMP_MB_AFTER_CLEAR_BIT();
6284 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
6285 rc = o->wait_comp(sc, o, cmd);
6289 return ECORE_SUCCESS;
6293 return ECORE_RET_PENDING(cmd, pending);