2 * Copyright (C) 2015 Cavium Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef NICVF_QUEUES_H
31 #define NICVF_QUEUES_H
33 #include <linux/netdevice.h>
36 #define MAX_QUEUE_SET 128
37 #define MAX_RCV_QUEUES_PER_QS 8
38 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
39 #define MAX_SND_QUEUES_PER_QS 8
40 #define MAX_CMP_QUEUES_PER_QS 8
42 /* VF's queue interrupt ranges */
43 #define NICVF_INTR_ID_CQ 0
44 #define NICVF_INTR_ID_SQ 8
45 #define NICVF_INTR_ID_RBDR 16
46 #define NICVF_INTR_ID_MISC 18
47 #define NICVF_INTR_ID_QS_ERR 19
49 #define for_each_cq_irq(irq) \
50 for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
51 #define for_each_sq_irq(irq) \
52 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
53 #define for_each_rbdr_irq(irq) \
54 for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
56 #define RBDR_SIZE0 0ULL /* 8K entries */
57 #define RBDR_SIZE1 1ULL /* 16K entries */
58 #define RBDR_SIZE2 2ULL /* 32K entries */
59 #define RBDR_SIZE3 3ULL /* 64K entries */
60 #define RBDR_SIZE4 4ULL /* 126K entries */
61 #define RBDR_SIZE5 5ULL /* 256K entries */
62 #define RBDR_SIZE6 6ULL /* 512K entries */
64 #define SND_QUEUE_SIZE0 0ULL /* 1K entries */
65 #define SND_QUEUE_SIZE1 1ULL /* 2K entries */
66 #define SND_QUEUE_SIZE2 2ULL /* 4K entries */
67 #define SND_QUEUE_SIZE3 3ULL /* 8K entries */
68 #define SND_QUEUE_SIZE4 4ULL /* 16K entries */
69 #define SND_QUEUE_SIZE5 5ULL /* 32K entries */
70 #define SND_QUEUE_SIZE6 6ULL /* 64K entries */
72 #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
73 #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
74 #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
75 #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
76 #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
77 #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
78 #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
80 /* Default queue count per QS, its lengths and threshold values */
82 #define RCV_QUEUE_CNT 8
83 #define SND_QUEUE_CNT 8
84 #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
86 #define SND_QSIZE SND_QUEUE_SIZE2
87 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
88 #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
89 #define SND_QUEUE_THRESH 2ULL
90 #define MIN_SQ_DESC_PER_PKT_XMIT 2
91 /* Since timestamp not enabled, otherwise 2 */
92 #define MAX_CQE_PER_PKT_XMIT 1
94 /* Keep CQ and SQ sizes same, if timestamping
95 * is enabled this equation will change.
97 #define CMP_QSIZE CMP_QUEUE_SIZE2
98 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
99 #define CMP_QUEUE_CQE_THRESH 0
100 #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
102 #define RBDR_SIZE RBDR_SIZE0
103 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
104 #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
105 #define RBDR_THRESH (RCV_BUF_COUNT / 2)
106 #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
107 #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
109 (NICVF_RCV_BUF_ALIGN_BYTES * 2))
110 #define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
112 #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
113 MAX_CQE_PER_PKT_XMIT)
114 /* Calculate number of CQEs to reserve for all SQEs.
115 * Its 1/256th level of CQ size.
116 * '+ 1' to account for pipelining
118 #define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
119 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
121 /* Descriptor size in bytes */
122 #define SND_QUEUE_DESC_SIZE 16
123 #define CMP_QUEUE_DESC_SIZE 512
125 /* Buffer / descriptor alignments */
126 #define NICVF_RCV_BUF_ALIGN 7
127 #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
128 #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
129 #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
131 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
132 #define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
133 (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
134 #define NICVF_RCV_BUF_ALIGN_LEN(X)\
135 (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
137 /* Queue enable/disable */
138 #define NICVF_SQ_EN BIT_ULL(19)
141 #define NICVF_CQ_RESET BIT_ULL(41)
142 #define NICVF_SQ_RESET BIT_ULL(17)
143 #define NICVF_RBDR_RESET BIT_ULL(43)
145 enum CQ_RX_ERRLVL_E {
153 CQ_RX_ERROP_RE_NONE = 0x0,
154 CQ_RX_ERROP_RE_PARTIAL = 0x1,
155 CQ_RX_ERROP_RE_JABBER = 0x2,
156 CQ_RX_ERROP_RE_FCS = 0x7,
157 CQ_RX_ERROP_RE_TERMINATE = 0x9,
158 CQ_RX_ERROP_RE_RX_CTL = 0xb,
159 CQ_RX_ERROP_PREL2_ERR = 0x1f,
160 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
161 CQ_RX_ERROP_L2_OVERRUN = 0x21,
162 CQ_RX_ERROP_L2_PFCS = 0x22,
163 CQ_RX_ERROP_L2_PUNY = 0x23,
164 CQ_RX_ERROP_L2_MAL = 0x24,
165 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
166 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
167 CQ_RX_ERROP_L2_LENMISM = 0x27,
168 CQ_RX_ERROP_L2_PCLP = 0x28,
169 CQ_RX_ERROP_IP_NOT = 0x41,
170 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
171 CQ_RX_ERROP_IP_MAL = 0x43,
172 CQ_RX_ERROP_IP_MALD = 0x44,
173 CQ_RX_ERROP_IP_HOP = 0x45,
174 CQ_RX_ERROP_L3_ICRC = 0x46,
175 CQ_RX_ERROP_L3_PCLP = 0x47,
176 CQ_RX_ERROP_L4_MAL = 0x61,
177 CQ_RX_ERROP_L4_CHK = 0x62,
178 CQ_RX_ERROP_UDP_LEN = 0x63,
179 CQ_RX_ERROP_L4_PORT = 0x64,
180 CQ_RX_ERROP_TCP_FLAG = 0x65,
181 CQ_RX_ERROP_TCP_OFFSET = 0x66,
182 CQ_RX_ERROP_L4_PCLP = 0x67,
183 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
187 CQ_TX_ERROP_GOOD = 0x0,
188 CQ_TX_ERROP_DESC_FAULT = 0x10,
189 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
190 CQ_TX_ERROP_SUBDC_ERR = 0x12,
191 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
192 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
193 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
194 CQ_TX_ERROP_LOCK_VIOL = 0x83,
195 CQ_TX_ERROP_DATA_FAULT = 0x84,
196 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
197 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
198 CQ_TX_ERROP_MEM_FAULT = 0x87,
199 CQ_TX_ERROP_CK_OVERLAP = 0x88,
200 CQ_TX_ERROP_CK_OFLOW = 0x89,
201 CQ_TX_ERROP_ENUM_LAST = 0x8a,
204 struct cmp_queue_stats {
221 } ____cacheline_aligned_in_smp;
228 struct rx_tx_queue_stats {
231 } ____cacheline_aligned_in_smp;
237 dma_addr_t phys_base;
246 u32 thresh; /* Threshold level for interrupt */
250 struct q_desc_mem dmem;
251 } ____cacheline_aligned_in_smp;
255 struct rbdr *rbdr_start;
256 struct rbdr *rbdr_cont;
257 bool en_tcp_reassembly;
258 u8 cq_qs; /* CQ's QS to which this RQ is assigned */
259 u8 cq_idx; /* CQ index (0 to 7) in the QS */
260 u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
261 u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
262 u8 start_rbdr_qs; /* First buffer ptrs - QS num */
263 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
265 struct rx_tx_queue_stats stats;
266 } ____cacheline_aligned_in_smp;
271 spinlock_t lock; /* lock to serialize processing CQEs */
273 struct q_desc_mem dmem;
274 struct cmp_queue_stats stats;
276 } ____cacheline_aligned_in_smp;
280 u8 cq_qs; /* CQ's QS to which this SQ is pointing */
281 u8 cq_idx; /* CQ index (0 to 7) in the above QS */
289 struct q_desc_mem dmem;
290 struct rx_tx_queue_stats stats;
291 } ____cacheline_aligned_in_smp;
304 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
305 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
306 struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
307 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
308 } ____cacheline_aligned_in_smp;
310 #define GET_RBDR_DESC(RING, idx)\
311 (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
312 #define GET_SQ_DESC(RING, idx)\
313 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
314 #define GET_CQ_DESC(RING, idx)\
315 (&(((union cq_desc_t *)((RING)->desc))[idx]))
318 #define CQ_WR_FULL BIT(26)
319 #define CQ_WR_DISABLE BIT(25)
320 #define CQ_WR_FAULT BIT(24)
321 #define CQ_CQE_COUNT (0xFFFF << 0)
323 #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
325 void nicvf_config_vlan_stripping(struct nicvf *nic,
326 netdev_features_t features);
327 int nicvf_set_qset_resources(struct nicvf *nic);
328 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
329 void nicvf_qset_config(struct nicvf *nic, bool enable);
330 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
331 int qidx, bool enable);
333 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
334 void nicvf_sq_disable(struct nicvf *nic, int qidx);
335 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
336 void nicvf_sq_free_used_descs(struct net_device *netdev,
337 struct snd_queue *sq, int qidx);
338 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
340 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
341 void nicvf_rbdr_task(unsigned long data);
342 void nicvf_rbdr_work(struct work_struct *work);
344 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
345 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
346 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
347 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
349 /* Register access APIs */
350 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
351 u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
352 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
353 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
354 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
356 u64 nicvf_queue_reg_read(struct nicvf *nic,
357 u64 offset, u64 qidx);
360 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
361 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
362 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
363 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
364 int nicvf_check_cqe_tx_errs(struct nicvf *nic,
365 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
366 #endif /* NICVF_QUEUES_H */