1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
12 /* Completion queue control */
14 u64 ring_base; /* 0x00 */
15 #define CQ_RING_BASE 0x00
16 u32 ring_size; /* 0x08 */
17 #define CQ_RING_SIZE 0x08
19 u32 flow_control_enable; /* 0x10 */
20 #define CQ_FLOW_CONTROL_ENABLE 0x10
22 u32 color_enable; /* 0x18 */
23 #define CQ_COLOR_ENABLE 0x18
25 u32 cq_head; /* 0x20 */
28 u32 cq_tail; /* 0x28 */
31 u32 cq_tail_color; /* 0x30 */
32 #define CQ_TAIL_COLOR 0x30
34 u32 interrupt_enable; /* 0x38 */
35 #define CQ_INTR_ENABLE 0x38
37 u32 cq_entry_enable; /* 0x40 */
38 #define CQ_ENTRY_ENABLE 0x40
40 u32 cq_message_enable; /* 0x48 */
41 #define CQ_MESSAGE_ENABLE 0x48
43 u32 interrupt_offset; /* 0x50 */
44 #define CQ_INTR_OFFSET 0x50
46 u64 cq_message_addr; /* 0x58 */
47 #define CQ_MESSAGE_ADDR 0x58
52 struct vnic_rx_bytes_counter {
53 unsigned int small_pkt_bytes_cnt;
54 unsigned int large_pkt_bytes_cnt;
60 struct vnic_dev *vdev;
61 struct vnic_res *ctrl;
62 struct vnic_dev_ring ring;
63 unsigned int to_clean;
64 unsigned int last_color;
65 unsigned int interrupt_offset;
67 struct vnic_rx_bytes_counter pkt_size_counter;
68 unsigned int cur_rx_coal_timeval;
69 unsigned int tobe_rx_coal_timeval;
78 void vnic_cq_free(struct vnic_cq *cq);
79 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
80 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
81 unsigned int cq_tail_color, unsigned int interrupt_enable,
82 unsigned int cq_entry_enable, unsigned int message_enable,
83 unsigned int interrupt_offset, u64 message_addr);
84 void vnic_cq_clean(struct vnic_cq *cq);
85 int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
86 unsigned int desc_size);
88 static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
89 unsigned int work_to_do,
90 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
91 u8 type, u16 q_number, u16 completed_index, void *opaque),
94 struct cq_desc *cq_desc;
95 unsigned int work_done = 0;
96 u16 q_number, completed_index;
99 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
100 cq->ring.desc_size * cq->to_clean);
101 cq_desc_dec(cq_desc, &type, &color,
102 &q_number, &completed_index);
104 while (color != cq->last_color) {
105 if ((*q_service)(cq->vdev, cq_desc, type,
106 q_number, completed_index, opaque))
110 if (cq->to_clean == cq->ring.desc_count) {
112 cq->last_color = cq->last_color ? 0 : 1;
115 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
116 cq->ring.desc_size * cq->to_clean);
117 cq_desc_dec(cq_desc, &type, &color,
118 &q_number, &completed_index);
121 if (work_done >= work_to_do)
128 static inline unsigned int vnic_cq_work(struct vnic_cq *cq,
129 unsigned int work_to_do)
131 struct cq_desc *cq_desc;
132 unsigned int work_avail = 0;
133 u16 q_number, completed_index;
135 u32 to_clean, last_color;
137 to_clean = cq->to_clean;
138 last_color = cq->last_color;
139 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
140 cq->ring.desc_size * to_clean);
141 cq_desc_dec(cq_desc, &type, &color,
142 &q_number, &completed_index);
144 while (color != last_color) {
146 if (to_clean == cq->ring.desc_count) {
148 last_color = last_color ? 0 : 1;
151 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
152 cq->ring.desc_size * to_clean);
153 cq_desc_dec(cq_desc, &type, &color,
154 &q_number, &completed_index);
157 if (work_avail >= work_to_do)
164 #endif /* _VNIC_CQ_H_ */