]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/contrib/octeon-sdk/cvmx-ipd.h
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / contrib / octeon-sdk / cvmx-ipd.h
1 /***********************license start***************
2  * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3  * reserved.
4  *
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *   * Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  *
13  *   * Redistributions in binary form must reproduce the above
14  *     copyright notice, this list of conditions and the following
15  *     disclaimer in the documentation and/or other materials provided
16  *     with the distribution.
17
18  *   * Neither the name of Cavium Networks nor the names of
19  *     its contributors may be used to endorse or promote products
20  *     derived from this software without specific prior written
21  *     permission.
22
23  * This Software, including technical data, may be subject to U.S. export  control
24  * laws, including the U.S. Export Administration Act and its  associated
25  * regulations, and may be subject to export or import  regulations in other
26  * countries.
27
28  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29  * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30  * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31  * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32  * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33  * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34  * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35  * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38  ***********************license end**************************************/
39
40
41
42
43
44
45
46 /**
47  * @file
48  *
49  * Interface to the hardware Input Packet Data unit.
50  *
51  * <hr>$Revision: 49448 $<hr>
52  */
53
54
55 #ifndef __CVMX_IPD_H__
56 #define __CVMX_IPD_H__
57
58 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
59 #include <asm/octeon/cvmx.h>
60 #include <asm/octeon/cvmx-config.h>
61 #include <asm/octeon/cvmx-ipd-defs.h>
62 #else
63 # ifndef CVMX_DONT_INCLUDE_CONFIG
64 #  include "executive-config.h"
65 #   ifdef CVMX_ENABLE_PKO_FUNCTIONS
66 #    include "cvmx-config.h"
67 #   endif
68 # endif
69 #endif
70
71 #ifdef  __cplusplus
72 extern "C" {
73 #endif
74
75 #ifndef CVMX_ENABLE_LEN_M8_FIX
76 #define CVMX_ENABLE_LEN_M8_FIX 0
77 #endif
78
79 /* CSR typedefs have been moved to cvmx-ipd-defs.h */
80
81 typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_not_first_skip_t;
82 typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
83
84
85 /**
86  * Configure IPD
87  *
88  * @param mbuff_size Packets buffer size in 8 byte words
89  * @param first_mbuff_skip
90  *                   Number of 8 byte words to skip in the first buffer
91  * @param not_first_mbuff_skip
92  *                   Number of 8 byte words to skip in each following buffer
93  * @param first_back Must be same as first_mbuff_skip / 128
94  * @param second_back
95  *                   Must be same as not_first_mbuff_skip / 128
96  * @param wqe_fpa_pool
97  *                   FPA pool to get work entries from
98  * @param cache_mode
99  * @param back_pres_enable_flag
100  *                   Enable or disable port back pressure at a global level.
101  *                   This should always be 1 as more accurate control can be
102  *                   found in IPD_PORTX_BP_PAGE_CNT[BP_ENB].
103  */
104 static inline void cvmx_ipd_config(uint64_t mbuff_size,
105                                    uint64_t first_mbuff_skip,
106                                    uint64_t not_first_mbuff_skip,
107                                    uint64_t first_back,
108                                    uint64_t second_back,
109                                    uint64_t wqe_fpa_pool,
110                                    cvmx_ipd_mode_t cache_mode,
111                                    uint64_t back_pres_enable_flag
112                                   )
113 {
114     cvmx_ipd_1st_mbuff_skip_t first_skip;
115     cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
116     cvmx_ipd_packet_mbuff_size_t size;
117     cvmx_ipd_1st_next_ptr_back_t first_back_struct;
118     cvmx_ipd_second_next_ptr_back_t second_back_struct;
119     cvmx_ipd_wqe_fpa_queue_t wqe_pool;
120     cvmx_ipd_ctl_status_t ipd_ctl_reg;
121
122     first_skip.u64 = 0;
123     first_skip.s.skip_sz = first_mbuff_skip;
124     cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
125
126     not_first_skip.u64 = 0;
127     not_first_skip.s.skip_sz = not_first_mbuff_skip;
128     cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
129
130     size.u64 = 0;
131     size.s.mb_size = mbuff_size;
132     cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
133
134     first_back_struct.u64 = 0;
135     first_back_struct.s.back = first_back;
136     cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
137
138     second_back_struct.u64 = 0;
139     second_back_struct.s.back = second_back;
140     cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK,second_back_struct.u64);
141
142     wqe_pool.u64 = 0;
143     wqe_pool.s.wqe_pool = wqe_fpa_pool;
144     cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
145
146     ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
147     ipd_ctl_reg.s.opc_mode = cache_mode;
148     ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
149     cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
150
151     /* Note: the example RED code that used to be here has been moved to
152         cvmx_helper_setup_red */
153 }
154
155
156 /**
157  * Enable IPD
158  */
159 static inline void cvmx_ipd_enable(void)
160 {
161     cvmx_ipd_ctl_status_t ipd_reg;
162     ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
163     if (ipd_reg.s.ipd_en)
164     {
165         cvmx_dprintf("Warning: Enabling IPD when IPD already enabled.\n");
166     }
167     ipd_reg.s.ipd_en = 1;
168     #if  CVMX_ENABLE_LEN_M8_FIX
169     if(!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
170         ipd_reg.s.len_m8 = 1;
171     }
172     #endif
173     cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
174 }
175
176
177 /**
178  * Disable IPD
179  */
180 static inline void cvmx_ipd_disable(void)
181 {
182     cvmx_ipd_ctl_status_t ipd_reg;
183     ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
184     ipd_reg.s.ipd_en = 0;
185     cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
186 }
187
188 #ifdef CVMX_ENABLE_PKO_FUNCTIONS
189 /**
190  * @INTERNAL
191  * This function is called by cvmx_helper_shutdown() to extract
192  * all FPA buffers out of the IPD and PIP. After this function
193  * completes, all FPA buffers that were prefetched by IPD and PIP
194  * wil be in the apropriate FPA pool. This functions does not reset
195  * PIP or IPD as FPA pool zero must be empty before the reset can
196  * be performed. WARNING: It is very important that IPD and PIP be
197  * reset soon after a call to this function.
198  */
199 static inline void __cvmx_ipd_free_ptr(void)
200 {
201     /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
202     if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
203         int no_wptr = 0;
204         cvmx_ipd_ptr_count_t ipd_ptr_count;
205         ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
206
207         /* Handle Work Queue Entry in cn56xx and cn52xx */
208         if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
209             cvmx_ipd_ctl_status_t ipd_ctl_status;
210             ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
211             if (ipd_ctl_status.s.no_wptr)
212                 no_wptr = 1;
213         }
214
215         /* Free the prefetched WQE */
216         if (ipd_ptr_count.s.wqev_cnt) {
217             cvmx_ipd_wqe_ptr_valid_t ipd_wqe_ptr_valid;
218             ipd_wqe_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
219             if (no_wptr)
220                 cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
221             else
222                 cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_wqe_ptr_valid.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
223         }
224
225         /* Free all WQE in the fifo */
226         if (ipd_ptr_count.s.wqe_pcnt) {
227             int i;
228             cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
229             ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
230             for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
231                 ipd_pwp_ptr_fifo_ctl.s.cena = 0;
232                 ipd_pwp_ptr_fifo_ctl.s.raddr = ipd_pwp_ptr_fifo_ctl.s.max_cnts + (ipd_pwp_ptr_fifo_ctl.s.wraddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
233                 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
234                 ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
235                 if (no_wptr)
236                     cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
237                 else
238                     cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_WQE_POOL, 0);
239             }
240             ipd_pwp_ptr_fifo_ctl.s.cena = 1;
241             cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
242         }
243
244         /* Free the prefetched packet */
245         if (ipd_ptr_count.s.pktv_cnt) {
246             cvmx_ipd_pkt_ptr_valid_t ipd_pkt_ptr_valid;
247             ipd_pkt_ptr_valid.u64 = cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
248             cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pkt_ptr_valid.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
249         }
250
251         /* Free the per port prefetched packets */
252         if (1) {
253             int i;
254             cvmx_ipd_prc_port_ptr_fifo_ctl_t ipd_prc_port_ptr_fifo_ctl;
255             ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
256
257             for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt; i++) {
258                 ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
259                 ipd_prc_port_ptr_fifo_ctl.s.raddr = i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
260                 cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
261                 ipd_prc_port_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
262                 cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_port_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
263             }
264             ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
265             cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, ipd_prc_port_ptr_fifo_ctl.u64);
266         }
267
268         /* Free all packets in the holding fifo */
269         if (ipd_ptr_count.s.pfif_cnt) {
270             int i;
271             cvmx_ipd_prc_hold_ptr_fifo_ctl_t ipd_prc_hold_ptr_fifo_ctl;
272
273             ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
274
275             for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
276                 ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
277                 ipd_prc_hold_ptr_fifo_ctl.s.raddr = (ipd_prc_hold_ptr_fifo_ctl.s.praddr + i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
278                 cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
279                 ipd_prc_hold_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
280                 cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_prc_hold_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
281             }
282             ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
283             cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, ipd_prc_hold_ptr_fifo_ctl.u64);
284         }
285
286         /* Free all packets in the fifo */
287         if (ipd_ptr_count.s.pkt_pcnt) {
288             int i;
289             cvmx_ipd_pwp_ptr_fifo_ctl_t ipd_pwp_ptr_fifo_ctl;
290             ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
291
292             for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
293                 ipd_pwp_ptr_fifo_ctl.s.cena = 0;
294                 ipd_pwp_ptr_fifo_ctl.s.raddr = (ipd_pwp_ptr_fifo_ctl.s.praddr+i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
295                 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
296                 ipd_pwp_ptr_fifo_ctl.u64 = cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
297                 cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)ipd_pwp_ptr_fifo_ctl.s.ptr<<7), CVMX_FPA_PACKET_POOL, 0);
298             }
299             ipd_pwp_ptr_fifo_ctl.s.cena = 1;
300             cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, ipd_pwp_ptr_fifo_ctl.u64);
301         }
302     }
303 }
304 #endif
305
306 #ifdef  __cplusplus
307 }
308 #endif
309
310 #endif  /*  __CVMX_IPD_H__ */