2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/phy.h>
29 #include <linux/clk.h>
30 #include <linux/hrtimer.h>
31 #include <linux/ktime.h>
32 #include <uapi/linux/ppp_defs.h>
36 /* RX Fifo Registers */
37 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40 #define MVPP2_RX_FIFO_INIT_REG 0x64
42 /* RX DMA Top Registers */
43 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
48 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
52 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53 #define MVPP2_RXQ_POOL_LONG_OFFS 24
54 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
59 /* Parser Registers */
60 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61 #define MVPP2_PRS_PORT_LU_MAX 0xf
62 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
71 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
74 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
78 /* Classifier Registers */
79 #define MVPP2_CLS_MODE_REG 0x1800
80 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81 #define MVPP2_CLS_PORT_WAY_REG 0x1810
82 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
84 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85 #define MVPP2_CLS_LKP_TBL_REG 0x1818
86 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
99 /* Descriptor Manager Top Registers */
100 #define MVPP2_RXQ_NUM_REG 0x2040
101 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
107 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111 #define MVPP2_RXQ_THRESH_REG 0x204c
112 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
113 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114 #define MVPP2_RXQ_INDEX_REG 0x2050
115 #define MVPP2_TXQ_NUM_REG 0x2080
116 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120 #define MVPP2_TXQ_THRESH_REG 0x2094
121 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123 #define MVPP2_TXQ_INDEX_REG 0x2098
124 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
125 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
126 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
127 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
128 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
129 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
130 #define MVPP2_TXQ_PENDING_REG 0x20a0
131 #define MVPP2_TXQ_PENDING_MASK 0x3fff
132 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
133 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
134 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
135 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
136 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
137 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
138 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
139 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
140 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
143 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
147 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
149 /* MBUS bridge registers */
150 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
151 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
152 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153 #define MVPP2_BASE_ADDR_ENABLE 0x4060
155 /* Interrupt Cause and Mask registers */
156 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
157 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
158 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
159 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
160 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
161 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
162 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
163 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
164 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
165 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
166 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
167 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
168 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
169 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
170 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
171 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
172 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
173 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
174 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
175 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
177 /* Buffer Manager registers */
178 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
179 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
180 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
181 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
182 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
183 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
184 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
185 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
186 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
187 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
188 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
189 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
190 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
191 #define MVPP2_BM_START_MASK BIT(0)
192 #define MVPP2_BM_STOP_MASK BIT(1)
193 #define MVPP2_BM_STATE_MASK BIT(4)
194 #define MVPP2_BM_LOW_THRESH_OFFS 8
195 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
196 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
197 MVPP2_BM_LOW_THRESH_OFFS)
198 #define MVPP2_BM_HIGH_THRESH_OFFS 16
199 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
200 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
201 MVPP2_BM_HIGH_THRESH_OFFS)
202 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
203 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
204 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
205 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
206 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
207 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
208 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
209 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
210 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
211 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
212 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
213 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
214 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
215 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
216 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
217 #define MVPP2_BM_MC_RLS_REG 0x64c4
218 #define MVPP2_BM_MC_ID_MASK 0xfff
219 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
221 /* TX Scheduler registers */
222 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
223 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
224 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
225 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
226 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
227 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
228 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
229 #define MVPP2_TXP_MTU_MAX 0x7FFFF
230 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
231 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
232 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
233 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
234 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
235 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
236 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
237 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
238 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
239 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
240 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
241 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
242 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
243 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
245 /* TX general registers */
246 #define MVPP2_TX_SNOOP_REG 0x8800
247 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
248 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
251 #define MVPP2_SRC_ADDR_MIDDLE 0x24
252 #define MVPP2_SRC_ADDR_HIGH 0x28
253 #define MVPP2_PHY_AN_CFG0_REG 0x34
254 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
255 #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
256 0x400 + (port) * 0x400)
257 #define MVPP2_MIB_LATE_COLLISION 0x7c
258 #define MVPP2_ISR_SUM_MASK_REG 0x220c
259 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
260 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
262 /* Per-port registers */
263 #define MVPP2_GMAC_CTRL_0_REG 0x0
264 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
265 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
266 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
267 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
268 #define MVPP2_GMAC_CTRL_1_REG 0x4
269 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
270 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
271 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
272 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
273 #define MVPP2_GMAC_SA_LOW_OFFS 7
274 #define MVPP2_GMAC_CTRL_2_REG 0x8
275 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
276 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
277 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
278 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
279 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
280 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
281 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
282 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
283 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
284 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
285 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
286 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
287 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
288 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
289 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
290 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
291 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
292 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
294 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
296 /* Descriptor ring Macros */
297 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
298 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
300 /* Various constants */
303 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
305 #define MVPP2_RX_COAL_PKTS 32
306 #define MVPP2_RX_COAL_USEC 100
308 /* The two bytes Marvell header. Either contains a special value used
309 * by Marvell switches when a specific hardware mode is enabled (not
310 * supported by this driver) or is filled automatically by zeroes on
311 * the RX side. Those two bytes being at the front of the Ethernet
312 * header, they allow to have the IP header aligned on a 4 bytes
313 * boundary automatically: the hardware skips those two bytes on its
316 #define MVPP2_MH_SIZE 2
317 #define MVPP2_ETH_TYPE_LEN 2
318 #define MVPP2_PPPOE_HDR_SIZE 8
319 #define MVPP2_VLAN_TAG_LEN 4
321 /* Lbtd 802.3 type */
322 #define MVPP2_IP_LBDT_TYPE 0xfffa
324 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
325 #define MVPP2_TX_CSUM_MAX_SIZE 9800
327 /* Timeout constants */
328 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
329 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
331 #define MVPP2_TX_MTU_MAX 0x7ffff
333 /* Maximum number of T-CONTs of PON port */
334 #define MVPP2_MAX_TCONT 16
336 /* Maximum number of supported ports */
337 #define MVPP2_MAX_PORTS 4
339 /* Maximum number of TXQs used by single port */
340 #define MVPP2_MAX_TXQ 8
342 /* Maximum number of RXQs used by single port */
343 #define MVPP2_MAX_RXQ 8
345 /* Dfault number of RXQs in use */
346 #define MVPP2_DEFAULT_RXQ 4
348 /* Total number of RXQs available to all ports */
349 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
351 /* Max number of Rx descriptors */
352 #define MVPP2_MAX_RXD 128
354 /* Max number of Tx descriptors */
355 #define MVPP2_MAX_TXD 1024
357 /* Amount of Tx descriptors that can be reserved at once by CPU */
358 #define MVPP2_CPU_DESC_CHUNK 64
360 /* Max number of Tx descriptors in each aggregated queue */
361 #define MVPP2_AGGR_TXQ_SIZE 256
363 /* Descriptor aligned size */
364 #define MVPP2_DESC_ALIGNED_SIZE 32
366 /* Descriptor alignment mask */
367 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
369 /* RX FIFO constants */
370 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
371 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
372 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
374 /* RX buffer constants */
375 #define MVPP2_SKB_SHINFO_SIZE \
376 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
378 #define MVPP2_RX_PKT_SIZE(mtu) \
379 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
380 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
382 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
383 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
384 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
385 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
387 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
389 /* IPv6 max L3 address size */
390 #define MVPP2_MAX_L3_ADDR_SIZE 16
393 #define MVPP2_F_LOOPBACK BIT(0)
395 /* Marvell tag types */
396 enum mvpp2_tag_type {
397 MVPP2_TAG_TYPE_NONE = 0,
398 MVPP2_TAG_TYPE_MH = 1,
399 MVPP2_TAG_TYPE_DSA = 2,
400 MVPP2_TAG_TYPE_EDSA = 3,
401 MVPP2_TAG_TYPE_VLAN = 4,
402 MVPP2_TAG_TYPE_LAST = 5
405 /* Parser constants */
406 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
407 #define MVPP2_PRS_TCAM_WORDS 6
408 #define MVPP2_PRS_SRAM_WORDS 4
409 #define MVPP2_PRS_FLOW_ID_SIZE 64
410 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
411 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
412 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
413 #define MVPP2_PRS_IPV4_HEAD 0x40
414 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
415 #define MVPP2_PRS_IPV4_MC 0xe0
416 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
417 #define MVPP2_PRS_IPV4_BC_MASK 0xff
418 #define MVPP2_PRS_IPV4_IHL 0x5
419 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
420 #define MVPP2_PRS_IPV6_MC 0xff
421 #define MVPP2_PRS_IPV6_MC_MASK 0xff
422 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
423 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
424 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
425 #define MVPP2_PRS_DBL_VLANS_MAX 100
428 * - lookup ID - 4 bits
430 * - additional information - 1 byte
431 * - header data - 8 bytes
432 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
434 #define MVPP2_PRS_AI_BITS 8
435 #define MVPP2_PRS_PORT_MASK 0xff
436 #define MVPP2_PRS_LU_MASK 0xf
437 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
438 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
439 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
440 (((offs) * 2) - ((offs) % 2) + 2)
441 #define MVPP2_PRS_TCAM_AI_BYTE 16
442 #define MVPP2_PRS_TCAM_PORT_BYTE 17
443 #define MVPP2_PRS_TCAM_LU_BYTE 20
444 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
445 #define MVPP2_PRS_TCAM_INV_WORD 5
446 /* Tcam entries ID */
447 #define MVPP2_PE_DROP_ALL 0
448 #define MVPP2_PE_FIRST_FREE_TID 1
449 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
450 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
451 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
452 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
453 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
454 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
455 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
456 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
457 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
458 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
459 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
460 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
461 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
462 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
463 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
464 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
465 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
466 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
467 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
468 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
469 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
470 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
471 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
472 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
473 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
476 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
478 #define MVPP2_PRS_SRAM_RI_OFFS 0
479 #define MVPP2_PRS_SRAM_RI_WORD 0
480 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
481 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
482 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
483 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
484 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
485 #define MVPP2_PRS_SRAM_UDF_OFFS 73
486 #define MVPP2_PRS_SRAM_UDF_BITS 8
487 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
488 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
489 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
490 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
491 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
492 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
493 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
494 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
495 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
496 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
497 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
498 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
499 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
500 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
501 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
502 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
503 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
504 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
505 #define MVPP2_PRS_SRAM_AI_OFFS 90
506 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
507 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
508 #define MVPP2_PRS_SRAM_AI_MASK 0xff
509 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
510 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
511 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
512 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
514 /* Sram result info bits assignment */
515 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
516 #define MVPP2_PRS_RI_DSA_MASK 0x2
517 #define MVPP2_PRS_RI_VLAN_MASK 0xc
518 #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
519 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
520 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
521 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
522 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
523 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
524 #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
525 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
526 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
527 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
528 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
529 #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
530 #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
531 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
532 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
533 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
534 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
535 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
536 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
537 #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
538 #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
539 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
540 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
541 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
542 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
543 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
544 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
545 #define MVPP2_PRS_RI_L4_TCP BIT(22)
546 #define MVPP2_PRS_RI_L4_UDP BIT(23)
547 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
548 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
549 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
550 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
552 /* Sram additional info bits assignment */
553 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
554 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
555 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
556 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
557 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
558 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
559 #define MVPP2_PRS_SINGLE_VLAN_AI 0
560 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
563 #define MVPP2_PRS_TAGGED true
564 #define MVPP2_PRS_UNTAGGED false
565 #define MVPP2_PRS_EDSA true
566 #define MVPP2_PRS_DSA false
568 /* MAC entries, shadow udf */
570 MVPP2_PRS_UDF_MAC_DEF,
571 MVPP2_PRS_UDF_MAC_RANGE,
572 MVPP2_PRS_UDF_L2_DEF,
573 MVPP2_PRS_UDF_L2_DEF_COPY,
574 MVPP2_PRS_UDF_L2_USER,
578 enum mvpp2_prs_lookup {
592 enum mvpp2_prs_l3_cast {
593 MVPP2_PRS_L3_UNI_CAST,
594 MVPP2_PRS_L3_MULTI_CAST,
595 MVPP2_PRS_L3_BROAD_CAST
598 /* Classifier constants */
599 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
600 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
601 #define MVPP2_CLS_LKP_TBL_SIZE 64
604 #define MVPP2_BM_POOLS_NUM 8
605 #define MVPP2_BM_LONG_BUF_NUM 1024
606 #define MVPP2_BM_SHORT_BUF_NUM 2048
607 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
608 #define MVPP2_BM_POOL_PTR_ALIGN 128
609 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
610 #define MVPP2_BM_SWF_SHORT_POOL 3
612 /* BM cookie (32 bits) definition */
613 #define MVPP2_BM_COOKIE_POOL_OFFS 8
614 #define MVPP2_BM_COOKIE_CPU_OFFS 24
616 /* BM short pool packet size
617 * These value assure that for SWF the total number
618 * of bytes allocated for each buffer will be 512
620 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
630 /* Shared Packet Processor resources */
632 /* Shared registers' base addresses */
634 void __iomem *lms_base;
640 /* List of pointers to port structures */
641 struct mvpp2_port **port_list;
643 /* Aggregated TXQs */
644 struct mvpp2_tx_queue *aggr_txqs;
647 struct mvpp2_bm_pool *bm_pools;
649 /* PRS shadow table */
650 struct mvpp2_prs_shadow *prs_shadow;
651 /* PRS auxiliary table for double vlan entries control */
652 bool *prs_double_vlans;
658 struct mvpp2_pcpu_stats {
659 struct u64_stats_sync syncp;
666 /* Per-CPU port control */
667 struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
681 /* Per-port registers' base address */
684 struct mvpp2_rx_queue **rxqs;
685 struct mvpp2_tx_queue **txqs;
686 struct net_device *dev;
690 u32 pending_cause_rx;
691 struct napi_struct napi;
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
701 struct mvpp2_pcpu_stats __percpu *stats;
703 struct phy_device *phy_dev;
704 phy_interface_t phy_interface;
705 struct device_node *phy_node;
710 struct mvpp2_bm_pool *pool_long;
711 struct mvpp2_bm_pool *pool_short;
713 /* Index of first port's physical RXQ */
717 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
718 * layout of the transmit and reception DMA descriptors, and their
719 * layout is therefore defined by the hardware design
722 #define MVPP2_TXD_L3_OFF_SHIFT 0
723 #define MVPP2_TXD_IP_HLEN_SHIFT 8
724 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
725 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
726 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
727 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
728 #define MVPP2_TXD_L4_UDP BIT(24)
729 #define MVPP2_TXD_L3_IP6 BIT(26)
730 #define MVPP2_TXD_L_DESC BIT(28)
731 #define MVPP2_TXD_F_DESC BIT(29)
733 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
734 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
735 #define MVPP2_RXD_ERR_CRC 0x0
736 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
737 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
738 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
739 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
740 #define MVPP2_RXD_HWF_SYNC BIT(21)
741 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
742 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
743 #define MVPP2_RXD_L4_TCP BIT(25)
744 #define MVPP2_RXD_L4_UDP BIT(26)
745 #define MVPP2_RXD_L3_IP4 BIT(28)
746 #define MVPP2_RXD_L3_IP6 BIT(30)
747 #define MVPP2_RXD_BUF_HDR BIT(31)
749 struct mvpp2_tx_desc {
750 u32 command; /* Options used by HW for packet transmitting.*/
751 u8 packet_offset; /* the offset from the buffer beginning */
752 u8 phys_txq; /* destination queue ID */
753 u16 data_size; /* data size of transmitted packet in bytes */
754 u32 buf_phys_addr; /* physical addr of transmitted buffer */
755 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
756 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
757 u32 reserved2; /* reserved (for future use) */
760 struct mvpp2_rx_desc {
761 u32 status; /* info about received packet */
762 u16 reserved1; /* parser_info (for future use, PnC) */
763 u16 data_size; /* size of received packet in bytes */
764 u32 buf_phys_addr; /* physical address of the buffer */
765 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
766 u16 reserved2; /* gem_port_id (for future use, PON) */
767 u16 reserved3; /* csum_l4 (for future use, PnC) */
768 u8 reserved4; /* bm_qset (for future use, BM) */
770 u16 reserved6; /* classify_info (for future use, PnC) */
771 u32 reserved7; /* flow_id (for future use, PnC) */
775 /* Per-CPU Tx queue control */
776 struct mvpp2_txq_pcpu {
779 /* Number of Tx DMA descriptors in the descriptor ring */
782 /* Number of currently used Tx DMA descriptor in the
787 /* Number of Tx DMA descriptors reserved for each CPU */
790 /* Array of transmitted skb */
791 struct sk_buff **tx_skb;
793 /* Array of transmitted buffers' physical addresses */
794 dma_addr_t *tx_buffs;
796 /* Index of last TX DMA descriptor that was inserted */
799 /* Index of the TX DMA descriptor to be cleaned up */
803 struct mvpp2_tx_queue {
804 /* Physical number of this Tx queue */
807 /* Logical number of this Tx queue */
810 /* Number of Tx DMA descriptors in the descriptor ring */
813 /* Number of currently used Tx DMA descriptor in the descriptor ring */
816 /* Per-CPU control of physical Tx queues */
817 struct mvpp2_txq_pcpu __percpu *pcpu;
819 /* Array of transmitted skb */
820 struct sk_buff **tx_skb;
824 /* Virtual address of thex Tx DMA descriptors array */
825 struct mvpp2_tx_desc *descs;
827 /* DMA address of the Tx DMA descriptors array */
828 dma_addr_t descs_phys;
830 /* Index of the last Tx DMA descriptor */
833 /* Index of the next Tx DMA descriptor to process */
834 int next_desc_to_proc;
837 struct mvpp2_rx_queue {
838 /* RX queue number, in the range 0-31 for physical RXQs */
841 /* Num of rx descriptors in the rx descriptor ring */
847 /* Virtual address of the RX DMA descriptors array */
848 struct mvpp2_rx_desc *descs;
850 /* DMA address of the RX DMA descriptors array */
851 dma_addr_t descs_phys;
853 /* Index of the last RX DMA descriptor */
856 /* Index of the next RX DMA descriptor to process */
857 int next_desc_to_proc;
859 /* ID of port to which physical RXQ is mapped */
862 /* Port's logic RXQ number to which physical RXQ is mapped */
866 union mvpp2_prs_tcam_entry {
867 u32 word[MVPP2_PRS_TCAM_WORDS];
868 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
871 union mvpp2_prs_sram_entry {
872 u32 word[MVPP2_PRS_SRAM_WORDS];
873 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
876 struct mvpp2_prs_entry {
878 union mvpp2_prs_tcam_entry tcam;
879 union mvpp2_prs_sram_entry sram;
882 struct mvpp2_prs_shadow {
889 /* User defined offset */
897 struct mvpp2_cls_flow_entry {
899 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
902 struct mvpp2_cls_lookup_entry {
908 struct mvpp2_bm_pool {
909 /* Pool number in the range 0-7 */
911 enum mvpp2_bm_type type;
913 /* Buffer Pointers Pool External (BPPE) size */
915 /* Number of buffers for this pool */
917 /* Pool buffer size */
922 /* BPPE virtual base address */
924 /* BPPE physical base address */
925 dma_addr_t phys_addr;
927 /* Ports using BM pool */
930 /* Occupied buffers indicator */
935 struct mvpp2_buff_hdr {
936 u32 next_buff_phys_addr;
937 u32 next_buff_virt_addr;
940 u8 reserved1; /* bm_qset (for future use, BM) */
943 /* Buffer header info bits */
944 #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
945 #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
946 #define MVPP2_B_HDR_INFO_LAST_OFFS 12
947 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
948 #define MVPP2_B_HDR_INFO_IS_LAST(info) \
949 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
951 /* Static declaractions */
953 /* Number of RXQs used by single port */
954 static int rxq_number = MVPP2_DEFAULT_RXQ;
955 /* Number of TXQs used by single port */
956 static int txq_number = MVPP2_MAX_TXQ;
958 #define MVPP2_DRIVER_NAME "mvpp2"
959 #define MVPP2_DRIVER_VERSION "1.0"
961 /* Utility/helper methods */
963 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
965 writel(data, priv->base + offset);
968 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
970 return readl(priv->base + offset);
973 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
975 txq_pcpu->txq_get_index++;
976 if (txq_pcpu->txq_get_index == txq_pcpu->size)
977 txq_pcpu->txq_get_index = 0;
980 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
982 struct mvpp2_tx_desc *tx_desc)
984 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
986 txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987 tx_desc->buf_phys_addr;
988 txq_pcpu->txq_put_index++;
989 if (txq_pcpu->txq_put_index == txq_pcpu->size)
990 txq_pcpu->txq_put_index = 0;
993 /* Get number of physical egress port */
994 static inline int mvpp2_egress_port(struct mvpp2_port *port)
996 return MVPP2_MAX_TCONT + port->id;
999 /* Get number of physical TXQ */
1000 static inline int mvpp2_txq_phys(int port, int txq)
1002 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1005 /* Parser configuration routines */
1007 /* Update parser tcam and sram hw entries */
1008 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1012 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1015 /* Clear entry invalidation bit */
1016 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1018 /* Write tcam index - indirect access */
1019 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1020 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1021 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1023 /* Write sram index - indirect access */
1024 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1025 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1026 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1031 /* Read tcam entry from hw */
1032 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1036 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1039 /* Write tcam index - indirect access */
1040 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1042 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1043 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1044 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1045 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1047 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1048 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1050 /* Write sram index - indirect access */
1051 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1052 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1053 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1058 /* Invalidate tcam hw entry */
1059 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1061 /* Write index - indirect access */
1062 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1063 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1064 MVPP2_PRS_TCAM_INV_MASK);
1067 /* Enable shadow table entry and set its lookup ID */
1068 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1070 priv->prs_shadow[index].valid = true;
1071 priv->prs_shadow[index].lu = lu;
1074 /* Update ri fields in shadow table entry */
1075 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1076 unsigned int ri, unsigned int ri_mask)
1078 priv->prs_shadow[index].ri_mask = ri_mask;
1079 priv->prs_shadow[index].ri = ri;
1082 /* Update lookup field in tcam sw entry */
1083 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1085 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1087 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1088 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1091 /* Update mask for single port in tcam sw entry */
1092 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1093 unsigned int port, bool add)
1095 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1098 pe->tcam.byte[enable_off] &= ~(1 << port);
1100 pe->tcam.byte[enable_off] |= 1 << port;
1103 /* Update port map in tcam sw entry */
1104 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1107 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1108 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1110 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1111 pe->tcam.byte[enable_off] &= ~port_mask;
1112 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1115 /* Obtain port map from tcam sw entry */
1116 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1118 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1120 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1123 /* Set byte of data and its enable bits in tcam sw entry */
1124 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1125 unsigned int offs, unsigned char byte,
1126 unsigned char enable)
1128 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1129 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1132 /* Get byte of data and its enable bits from tcam sw entry */
1133 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1134 unsigned int offs, unsigned char *byte,
1135 unsigned char *enable)
1137 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1138 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1141 /* Compare tcam data bytes with a pattern */
1142 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1145 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1148 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1149 if (tcam_data != data)
1154 /* Update ai bits in tcam sw entry */
1155 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1156 unsigned int bits, unsigned int enable)
1158 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1160 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1162 if (!(enable & BIT(i)))
1166 pe->tcam.byte[ai_idx] |= 1 << i;
1168 pe->tcam.byte[ai_idx] &= ~(1 << i);
1171 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1174 /* Get ai bits from tcam sw entry */
1175 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1177 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1180 /* Set ethertype in tcam sw entry */
1181 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1182 unsigned short ethertype)
1184 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1185 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1188 /* Set bits in sram sw entry */
1189 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1192 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1195 /* Clear bits in sram sw entry */
1196 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1199 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1202 /* Update ri bits in sram sw entry */
1203 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1204 unsigned int bits, unsigned int mask)
1208 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1209 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1211 if (!(mask & BIT(i)))
1215 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1217 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1219 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1223 /* Obtain ri bits from sram sw entry */
1224 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1226 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1229 /* Update ai bits in sram sw entry */
1230 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1231 unsigned int bits, unsigned int mask)
1234 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1236 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1238 if (!(mask & BIT(i)))
1242 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1244 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1246 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1250 /* Read ai bits from sram sw entry */
1251 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1254 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1255 int ai_en_off = ai_off + 1;
1256 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1258 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1259 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1264 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1267 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1270 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1272 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1273 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1274 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1277 /* In the sram sw entry set sign and value of the next lookup offset
1278 * and the offset value generated to the classifier
1280 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1285 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1288 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1292 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1293 (unsigned char)shift;
1295 /* Reset and set operation */
1296 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1297 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1298 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1300 /* Set base offset as current */
1301 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1304 /* In the sram sw entry set sign and value of the user defined offset
1305 * generated to the classifier
1307 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1308 unsigned int type, int offset,
1313 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1314 offset = 0 - offset;
1316 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1320 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1321 MVPP2_PRS_SRAM_UDF_MASK);
1322 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1323 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1324 MVPP2_PRS_SRAM_UDF_BITS)] &=
1325 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1326 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1327 MVPP2_PRS_SRAM_UDF_BITS)] |=
1328 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1330 /* Set offset type */
1331 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1332 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1333 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1335 /* Set offset operation */
1336 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1337 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1338 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1340 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1341 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1342 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1343 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1345 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1346 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1347 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1349 /* Set base offset as current */
1350 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1353 /* Find parser flow entry */
1354 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1356 struct mvpp2_prs_entry *pe;
1359 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1362 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1364 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1365 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1368 if (!priv->prs_shadow[tid].valid ||
1369 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1373 mvpp2_prs_hw_read(priv, pe);
1374 bits = mvpp2_prs_sram_ai_get(pe);
1376 /* Sram store classification lookup ID in AI bits [5:0] */
1377 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1385 /* Return first free tcam index, seeking from start to end */
1386 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1394 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1395 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1397 for (tid = start; tid <= end; tid++) {
1398 if (!priv->prs_shadow[tid].valid)
1405 /* Enable/disable dropping all mac da's */
1406 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1408 struct mvpp2_prs_entry pe;
1410 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1411 /* Entry exist - update port only */
1412 pe.index = MVPP2_PE_DROP_ALL;
1413 mvpp2_prs_hw_read(priv, &pe);
1415 /* Entry doesn't exist - create new */
1416 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1417 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1418 pe.index = MVPP2_PE_DROP_ALL;
1420 /* Non-promiscuous mode for all ports - DROP unknown packets */
1421 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1422 MVPP2_PRS_RI_DROP_MASK);
1424 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1425 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1427 /* Update shadow table */
1428 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1430 /* Mask all ports */
1431 mvpp2_prs_tcam_port_map_set(&pe, 0);
1434 /* Update port mask */
1435 mvpp2_prs_tcam_port_set(&pe, port, add);
1437 mvpp2_prs_hw_write(priv, &pe);
1440 /* Set port to promiscuous mode */
1441 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1443 struct mvpp2_prs_entry pe;
1445 /* Promiscuous mode - Accept unknown packets */
1447 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1448 /* Entry exist - update port only */
1449 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1450 mvpp2_prs_hw_read(priv, &pe);
1452 /* Entry doesn't exist - create new */
1453 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1454 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1455 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1457 /* Continue - set next lookup */
1458 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1460 /* Set result info bits */
1461 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1462 MVPP2_PRS_RI_L2_CAST_MASK);
1464 /* Shift to ethertype */
1465 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1466 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1468 /* Mask all ports */
1469 mvpp2_prs_tcam_port_map_set(&pe, 0);
1471 /* Update shadow table */
1472 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1475 /* Update port mask */
1476 mvpp2_prs_tcam_port_set(&pe, port, add);
1478 mvpp2_prs_hw_write(priv, &pe);
1481 /* Accept multicast */
1482 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1485 struct mvpp2_prs_entry pe;
1486 unsigned char da_mc;
1488 /* Ethernet multicast address first byte is
1489 * 0x01 for IPv4 and 0x33 for IPv6
1491 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1493 if (priv->prs_shadow[index].valid) {
1494 /* Entry exist - update port only */
1496 mvpp2_prs_hw_read(priv, &pe);
1498 /* Entry doesn't exist - create new */
1499 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1500 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1503 /* Continue - set next lookup */
1504 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1506 /* Set result info bits */
1507 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1508 MVPP2_PRS_RI_L2_CAST_MASK);
1510 /* Update tcam entry data first byte */
1511 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1513 /* Shift to ethertype */
1514 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1515 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1517 /* Mask all ports */
1518 mvpp2_prs_tcam_port_map_set(&pe, 0);
1520 /* Update shadow table */
1521 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1524 /* Update port mask */
1525 mvpp2_prs_tcam_port_set(&pe, port, add);
1527 mvpp2_prs_hw_write(priv, &pe);
1530 /* Set entry for dsa packets */
1531 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1532 bool tagged, bool extend)
1534 struct mvpp2_prs_entry pe;
1538 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1541 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1545 if (priv->prs_shadow[tid].valid) {
1546 /* Entry exist - update port only */
1548 mvpp2_prs_hw_read(priv, &pe);
1550 /* Entry doesn't exist - create new */
1551 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1552 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1555 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1556 mvpp2_prs_sram_shift_set(&pe, shift,
1557 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1559 /* Update shadow table */
1560 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1563 /* Set tagged bit in DSA tag */
1564 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1565 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1566 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1567 /* Clear all ai bits for next iteration */
1568 mvpp2_prs_sram_ai_update(&pe, 0,
1569 MVPP2_PRS_SRAM_AI_MASK);
1570 /* If packet is tagged continue check vlans */
1571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1573 /* Set result info bits to 'no vlans' */
1574 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1575 MVPP2_PRS_RI_VLAN_MASK);
1576 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1579 /* Mask all ports */
1580 mvpp2_prs_tcam_port_map_set(&pe, 0);
1583 /* Update port mask */
1584 mvpp2_prs_tcam_port_set(&pe, port, add);
1586 mvpp2_prs_hw_write(priv, &pe);
1589 /* Set entry for dsa ethertype */
1590 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1591 bool add, bool tagged, bool extend)
1593 struct mvpp2_prs_entry pe;
1594 int tid, shift, port_mask;
1597 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1598 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1602 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1603 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1604 port_mask = MVPP2_PRS_PORT_MASK;
1608 if (priv->prs_shadow[tid].valid) {
1609 /* Entry exist - update port only */
1611 mvpp2_prs_hw_read(priv, &pe);
1613 /* Entry doesn't exist - create new */
1614 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1615 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1619 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1620 mvpp2_prs_match_etype(&pe, 2, 0);
1622 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1623 MVPP2_PRS_RI_DSA_MASK);
1624 /* Shift ethertype + 2 byte reserved + tag*/
1625 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1626 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1628 /* Update shadow table */
1629 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1632 /* Set tagged bit in DSA tag */
1633 mvpp2_prs_tcam_data_byte_set(&pe,
1634 MVPP2_ETH_TYPE_LEN + 2 + 3,
1635 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1636 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1637 /* Clear all ai bits for next iteration */
1638 mvpp2_prs_sram_ai_update(&pe, 0,
1639 MVPP2_PRS_SRAM_AI_MASK);
1640 /* If packet is tagged continue check vlans */
1641 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1643 /* Set result info bits to 'no vlans' */
1644 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1645 MVPP2_PRS_RI_VLAN_MASK);
1646 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1648 /* Mask/unmask all ports, depending on dsa type */
1649 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1652 /* Update port mask */
1653 mvpp2_prs_tcam_port_set(&pe, port, add);
1655 mvpp2_prs_hw_write(priv, &pe);
1658 /* Search for existing single/triple vlan entry */
1659 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1660 unsigned short tpid, int ai)
1662 struct mvpp2_prs_entry *pe;
1665 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1668 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1670 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1671 for (tid = MVPP2_PE_FIRST_FREE_TID;
1672 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1673 unsigned int ri_bits, ai_bits;
1676 if (!priv->prs_shadow[tid].valid ||
1677 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1682 mvpp2_prs_hw_read(priv, pe);
1683 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1688 ri_bits = mvpp2_prs_sram_ri_get(pe);
1689 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1691 /* Get current ai value from tcam */
1692 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1693 /* Clear double vlan bit */
1694 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1699 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1700 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1708 /* Add/update single/triple vlan entry */
1709 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1710 unsigned int port_map)
1712 struct mvpp2_prs_entry *pe;
1716 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1719 /* Create new tcam entry */
1720 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1721 MVPP2_PE_FIRST_FREE_TID);
1725 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1729 /* Get last double vlan tid */
1730 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1731 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1732 unsigned int ri_bits;
1734 if (!priv->prs_shadow[tid_aux].valid ||
1735 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1738 pe->index = tid_aux;
1739 mvpp2_prs_hw_read(priv, pe);
1740 ri_bits = mvpp2_prs_sram_ri_get(pe);
1741 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1742 MVPP2_PRS_RI_VLAN_DOUBLE)
1746 if (tid <= tid_aux) {
1751 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1752 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1755 mvpp2_prs_match_etype(pe, 0, tpid);
1757 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1758 /* Shift 4 bytes - skip 1 vlan tag */
1759 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1760 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1761 /* Clear all ai bits for next iteration */
1762 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1764 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1765 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1766 MVPP2_PRS_RI_VLAN_MASK);
1768 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1769 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1770 MVPP2_PRS_RI_VLAN_MASK);
1772 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1774 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1776 /* Update ports' mask */
1777 mvpp2_prs_tcam_port_map_set(pe, port_map);
1779 mvpp2_prs_hw_write(priv, pe);
1787 /* Get first free double vlan ai number */
1788 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1792 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1793 if (!priv->prs_double_vlans[i])
1800 /* Search for existing double vlan entry */
1801 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1802 unsigned short tpid1,
1803 unsigned short tpid2)
1805 struct mvpp2_prs_entry *pe;
1808 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1811 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1813 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1814 for (tid = MVPP2_PE_FIRST_FREE_TID;
1815 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1816 unsigned int ri_mask;
1819 if (!priv->prs_shadow[tid].valid ||
1820 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1824 mvpp2_prs_hw_read(priv, pe);
1826 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1827 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1832 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1833 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1841 /* Add or update double vlan entry */
1842 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1843 unsigned short tpid2,
1844 unsigned int port_map)
1846 struct mvpp2_prs_entry *pe;
1847 int tid_aux, tid, ai, ret = 0;
1849 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1852 /* Create new tcam entry */
1853 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1854 MVPP2_PE_LAST_FREE_TID);
1858 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1862 /* Set ai value for new double vlan entry */
1863 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1869 /* Get first single/triple vlan tid */
1870 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1871 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1872 unsigned int ri_bits;
1874 if (!priv->prs_shadow[tid_aux].valid ||
1875 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1878 pe->index = tid_aux;
1879 mvpp2_prs_hw_read(priv, pe);
1880 ri_bits = mvpp2_prs_sram_ri_get(pe);
1881 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1882 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1883 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1887 if (tid >= tid_aux) {
1892 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1893 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1896 priv->prs_double_vlans[ai] = true;
1898 mvpp2_prs_match_etype(pe, 0, tpid1);
1899 mvpp2_prs_match_etype(pe, 4, tpid2);
1901 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1902 /* Shift 8 bytes - skip 2 vlan tags */
1903 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1904 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1905 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1906 MVPP2_PRS_RI_VLAN_MASK);
1907 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1908 MVPP2_PRS_SRAM_AI_MASK);
1910 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1913 /* Update ports' mask */
1914 mvpp2_prs_tcam_port_map_set(pe, port_map);
1915 mvpp2_prs_hw_write(priv, pe);
1922 /* IPv4 header parsing for fragmentation and L4 offset */
1923 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1924 unsigned int ri, unsigned int ri_mask)
1926 struct mvpp2_prs_entry pe;
1929 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1930 (proto != IPPROTO_IGMP))
1933 /* Fragmented packet */
1934 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1935 MVPP2_PE_LAST_FREE_TID);
1939 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1940 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1943 /* Set next lu to IPv4 */
1944 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1945 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1947 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1948 sizeof(struct iphdr) - 4,
1949 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1950 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1951 MVPP2_PRS_IPV4_DIP_AI_BIT);
1952 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1953 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1955 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1956 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1957 /* Unmask all ports */
1958 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1960 /* Update shadow table and hw entry */
1961 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1962 mvpp2_prs_hw_write(priv, &pe);
1964 /* Not fragmented packet */
1965 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1966 MVPP2_PE_LAST_FREE_TID);
1971 /* Clear ri before updating */
1972 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1973 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1974 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1976 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1977 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1979 /* Update shadow table and hw entry */
1980 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1981 mvpp2_prs_hw_write(priv, &pe);
1986 /* IPv4 L3 multicast or broadcast */
1987 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1989 struct mvpp2_prs_entry pe;
1992 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1993 MVPP2_PE_LAST_FREE_TID);
1997 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1998 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2002 case MVPP2_PRS_L3_MULTI_CAST:
2003 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2004 MVPP2_PRS_IPV4_MC_MASK);
2005 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2006 MVPP2_PRS_RI_L3_ADDR_MASK);
2008 case MVPP2_PRS_L3_BROAD_CAST:
2009 mask = MVPP2_PRS_IPV4_BC_MASK;
2010 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2011 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2012 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2013 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2014 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2015 MVPP2_PRS_RI_L3_ADDR_MASK);
2021 /* Finished: go to flowid generation */
2022 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2023 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2025 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2026 MVPP2_PRS_IPV4_DIP_AI_BIT);
2027 /* Unmask all ports */
2028 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2030 /* Update shadow table and hw entry */
2031 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2032 mvpp2_prs_hw_write(priv, &pe);
2037 /* Set entries for protocols over IPv6 */
2038 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2039 unsigned int ri, unsigned int ri_mask)
2041 struct mvpp2_prs_entry pe;
2044 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2045 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2048 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2049 MVPP2_PE_LAST_FREE_TID);
2053 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2054 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2057 /* Finished: go to flowid generation */
2058 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2059 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2060 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2061 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2062 sizeof(struct ipv6hdr) - 6,
2063 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2065 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2066 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2067 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2068 /* Unmask all ports */
2069 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2072 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2073 mvpp2_prs_hw_write(priv, &pe);
2078 /* IPv6 L3 multicast entry */
2079 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2081 struct mvpp2_prs_entry pe;
2084 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2087 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2088 MVPP2_PE_LAST_FREE_TID);
2092 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2093 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2096 /* Finished: go to flowid generation */
2097 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2098 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2099 MVPP2_PRS_RI_L3_ADDR_MASK);
2100 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2101 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2102 /* Shift back to IPv6 NH */
2103 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2105 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2106 MVPP2_PRS_IPV6_MC_MASK);
2107 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2108 /* Unmask all ports */
2109 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2111 /* Update shadow table and hw entry */
2112 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2113 mvpp2_prs_hw_write(priv, &pe);
2118 /* Parser per-port initialization */
2119 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2120 int lu_max, int offset)
2125 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2126 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2127 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2128 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2130 /* Set maximum number of loops for packet received from port */
2131 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2132 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2133 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2134 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2136 /* Set initial offset for packet header extraction for the first
2139 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2140 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2141 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2142 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2145 /* Default flow entries initialization for all ports */
2146 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2148 struct mvpp2_prs_entry pe;
2151 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2152 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2153 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2154 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2156 /* Mask all ports */
2157 mvpp2_prs_tcam_port_map_set(&pe, 0);
2160 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2161 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2163 /* Update shadow table and hw entry */
2164 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2165 mvpp2_prs_hw_write(priv, &pe);
2169 /* Set default entry for Marvell Header field */
2170 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2172 struct mvpp2_prs_entry pe;
2174 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2176 pe.index = MVPP2_PE_MH_DEFAULT;
2177 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2178 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2179 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2180 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2182 /* Unmask all ports */
2183 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2185 /* Update shadow table and hw entry */
2186 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2187 mvpp2_prs_hw_write(priv, &pe);
2190 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2191 * multicast MAC addresses
2193 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2195 struct mvpp2_prs_entry pe;
2197 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2199 /* Non-promiscuous mode for all ports - DROP unknown packets */
2200 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2201 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2203 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2204 MVPP2_PRS_RI_DROP_MASK);
2205 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2206 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2208 /* Unmask all ports */
2209 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2211 /* Update shadow table and hw entry */
2212 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2213 mvpp2_prs_hw_write(priv, &pe);
2215 /* place holders only - no ports */
2216 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2217 mvpp2_prs_mac_promisc_set(priv, 0, false);
2218 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2219 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2222 /* Set default entries for various types of dsa packets */
2223 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2225 struct mvpp2_prs_entry pe;
2227 /* None tagged EDSA entry - place holder */
2228 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2231 /* Tagged EDSA entry - place holder */
2232 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2234 /* None tagged DSA entry - place holder */
2235 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2238 /* Tagged DSA entry - place holder */
2239 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2241 /* None tagged EDSA ethertype entry - place holder*/
2242 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2243 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2245 /* Tagged EDSA ethertype entry - place holder*/
2246 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2247 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2249 /* None tagged DSA ethertype entry */
2250 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2251 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2253 /* Tagged DSA ethertype entry */
2254 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2255 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2257 /* Set default entry, in case DSA or EDSA tag not found */
2258 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2259 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2260 pe.index = MVPP2_PE_DSA_DEFAULT;
2261 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2264 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2265 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2267 /* Clear all sram ai bits for next iteration */
2268 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2270 /* Unmask all ports */
2271 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2273 mvpp2_prs_hw_write(priv, &pe);
2276 /* Match basic ethertypes */
2277 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2279 struct mvpp2_prs_entry pe;
2282 /* Ethertype: PPPoE */
2283 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2284 MVPP2_PE_LAST_FREE_TID);
2288 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2289 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2292 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2294 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2295 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2296 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2297 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2298 MVPP2_PRS_RI_PPPOE_MASK);
2300 /* Update shadow table and hw entry */
2301 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2302 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2303 priv->prs_shadow[pe.index].finish = false;
2304 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2305 MVPP2_PRS_RI_PPPOE_MASK);
2306 mvpp2_prs_hw_write(priv, &pe);
2308 /* Ethertype: ARP */
2309 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2310 MVPP2_PE_LAST_FREE_TID);
2314 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2315 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2318 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2320 /* Generate flow in the next iteration*/
2321 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2322 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2323 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2324 MVPP2_PRS_RI_L3_PROTO_MASK);
2326 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2328 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2330 /* Update shadow table and hw entry */
2331 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2332 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2333 priv->prs_shadow[pe.index].finish = true;
2334 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2335 MVPP2_PRS_RI_L3_PROTO_MASK);
2336 mvpp2_prs_hw_write(priv, &pe);
2338 /* Ethertype: LBTD */
2339 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2340 MVPP2_PE_LAST_FREE_TID);
2344 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2345 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2348 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2350 /* Generate flow in the next iteration*/
2351 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2352 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2353 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2354 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2355 MVPP2_PRS_RI_CPU_CODE_MASK |
2356 MVPP2_PRS_RI_UDF3_MASK);
2358 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2360 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2362 /* Update shadow table and hw entry */
2363 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2364 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2365 priv->prs_shadow[pe.index].finish = true;
2366 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2367 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2368 MVPP2_PRS_RI_CPU_CODE_MASK |
2369 MVPP2_PRS_RI_UDF3_MASK);
2370 mvpp2_prs_hw_write(priv, &pe);
2372 /* Ethertype: IPv4 without options */
2373 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2374 MVPP2_PE_LAST_FREE_TID);
2378 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2379 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2382 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2383 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2384 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2385 MVPP2_PRS_IPV4_HEAD_MASK |
2386 MVPP2_PRS_IPV4_IHL_MASK);
2388 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2389 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2390 MVPP2_PRS_RI_L3_PROTO_MASK);
2391 /* Skip eth_type + 4 bytes of IP header */
2392 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2393 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2395 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2397 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2399 /* Update shadow table and hw entry */
2400 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2401 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2402 priv->prs_shadow[pe.index].finish = false;
2403 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2404 MVPP2_PRS_RI_L3_PROTO_MASK);
2405 mvpp2_prs_hw_write(priv, &pe);
2407 /* Ethertype: IPv4 with options */
2408 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2409 MVPP2_PE_LAST_FREE_TID);
2415 /* Clear tcam data before updating */
2416 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2417 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2419 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2420 MVPP2_PRS_IPV4_HEAD,
2421 MVPP2_PRS_IPV4_HEAD_MASK);
2423 /* Clear ri before updating */
2424 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2425 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2426 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2427 MVPP2_PRS_RI_L3_PROTO_MASK);
2429 /* Update shadow table and hw entry */
2430 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2431 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2432 priv->prs_shadow[pe.index].finish = false;
2433 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2434 MVPP2_PRS_RI_L3_PROTO_MASK);
2435 mvpp2_prs_hw_write(priv, &pe);
2437 /* Ethertype: IPv6 without options */
2438 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2439 MVPP2_PE_LAST_FREE_TID);
2443 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2444 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2447 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2449 /* Skip DIP of IPV6 header */
2450 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2451 MVPP2_MAX_L3_ADDR_SIZE,
2452 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2453 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2454 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2455 MVPP2_PRS_RI_L3_PROTO_MASK);
2457 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2459 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2461 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2462 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2463 priv->prs_shadow[pe.index].finish = false;
2464 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2465 MVPP2_PRS_RI_L3_PROTO_MASK);
2466 mvpp2_prs_hw_write(priv, &pe);
2468 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2469 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2470 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2471 pe.index = MVPP2_PE_ETH_TYPE_UN;
2473 /* Unmask all ports */
2474 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2476 /* Generate flow in the next iteration*/
2477 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2478 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2479 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2480 MVPP2_PRS_RI_L3_PROTO_MASK);
2481 /* Set L3 offset even it's unknown L3 */
2482 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2484 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2486 /* Update shadow table and hw entry */
2487 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2488 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2489 priv->prs_shadow[pe.index].finish = true;
2490 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2491 MVPP2_PRS_RI_L3_PROTO_MASK);
2492 mvpp2_prs_hw_write(priv, &pe);
2497 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2504 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2506 struct mvpp2_prs_entry pe;
2509 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2510 MVPP2_PRS_DBL_VLANS_MAX,
2512 if (!priv->prs_double_vlans)
2515 /* Double VLAN: 0x8100, 0x88A8 */
2516 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2517 MVPP2_PRS_PORT_MASK);
2521 /* Double VLAN: 0x8100, 0x8100 */
2522 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2523 MVPP2_PRS_PORT_MASK);
2527 /* Single VLAN: 0x88a8 */
2528 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2529 MVPP2_PRS_PORT_MASK);
2533 /* Single VLAN: 0x8100 */
2534 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2535 MVPP2_PRS_PORT_MASK);
2539 /* Set default double vlan entry */
2540 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2541 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2542 pe.index = MVPP2_PE_VLAN_DBL;
2544 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2545 /* Clear ai for next iterations */
2546 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2547 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2548 MVPP2_PRS_RI_VLAN_MASK);
2550 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2551 MVPP2_PRS_DBL_VLAN_AI_BIT);
2552 /* Unmask all ports */
2553 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2555 /* Update shadow table and hw entry */
2556 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2557 mvpp2_prs_hw_write(priv, &pe);
2559 /* Set default vlan none entry */
2560 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2561 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2562 pe.index = MVPP2_PE_VLAN_NONE;
2564 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2565 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2566 MVPP2_PRS_RI_VLAN_MASK);
2568 /* Unmask all ports */
2569 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2571 /* Update shadow table and hw entry */
2572 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2573 mvpp2_prs_hw_write(priv, &pe);
2578 /* Set entries for PPPoE ethertype */
2579 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2581 struct mvpp2_prs_entry pe;
2584 /* IPv4 over PPPoE with options */
2585 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2586 MVPP2_PE_LAST_FREE_TID);
2590 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2591 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2594 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2596 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2597 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2598 MVPP2_PRS_RI_L3_PROTO_MASK);
2599 /* Skip eth_type + 4 bytes of IP header */
2600 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2601 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2603 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2605 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2607 /* Update shadow table and hw entry */
2608 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2609 mvpp2_prs_hw_write(priv, &pe);
2611 /* IPv4 over PPPoE without options */
2612 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2613 MVPP2_PE_LAST_FREE_TID);
2619 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2620 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2621 MVPP2_PRS_IPV4_HEAD_MASK |
2622 MVPP2_PRS_IPV4_IHL_MASK);
2624 /* Clear ri before updating */
2625 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2626 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2627 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2628 MVPP2_PRS_RI_L3_PROTO_MASK);
2630 /* Update shadow table and hw entry */
2631 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2632 mvpp2_prs_hw_write(priv, &pe);
2634 /* IPv6 over PPPoE */
2635 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2636 MVPP2_PE_LAST_FREE_TID);
2640 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2641 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2644 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2646 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2647 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2648 MVPP2_PRS_RI_L3_PROTO_MASK);
2649 /* Skip eth_type + 4 bytes of IPv6 header */
2650 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2651 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2653 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2655 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2657 /* Update shadow table and hw entry */
2658 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2659 mvpp2_prs_hw_write(priv, &pe);
2661 /* Non-IP over PPPoE */
2662 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2663 MVPP2_PE_LAST_FREE_TID);
2667 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2668 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2671 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2672 MVPP2_PRS_RI_L3_PROTO_MASK);
2674 /* Finished: go to flowid generation */
2675 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2676 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2677 /* Set L3 offset even if it's unknown L3 */
2678 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2680 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2682 /* Update shadow table and hw entry */
2683 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2684 mvpp2_prs_hw_write(priv, &pe);
2689 /* Initialize entries for IPv4 */
2690 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2692 struct mvpp2_prs_entry pe;
2695 /* Set entries for TCP, UDP and IGMP over IPv4 */
2696 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2697 MVPP2_PRS_RI_L4_PROTO_MASK);
2701 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2702 MVPP2_PRS_RI_L4_PROTO_MASK);
2706 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2707 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2708 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2709 MVPP2_PRS_RI_CPU_CODE_MASK |
2710 MVPP2_PRS_RI_UDF3_MASK);
2714 /* IPv4 Broadcast */
2715 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2719 /* IPv4 Multicast */
2720 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2724 /* Default IPv4 entry for unknown protocols */
2725 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2726 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2727 pe.index = MVPP2_PE_IP4_PROTO_UN;
2729 /* Set next lu to IPv4 */
2730 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2731 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2733 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2734 sizeof(struct iphdr) - 4,
2735 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2736 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2737 MVPP2_PRS_IPV4_DIP_AI_BIT);
2738 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2739 MVPP2_PRS_RI_L4_PROTO_MASK);
2741 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2742 /* Unmask all ports */
2743 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2745 /* Update shadow table and hw entry */
2746 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2747 mvpp2_prs_hw_write(priv, &pe);
2749 /* Default IPv4 entry for unicast address */
2750 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2751 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2752 pe.index = MVPP2_PE_IP4_ADDR_UN;
2754 /* Finished: go to flowid generation */
2755 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2756 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2757 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2758 MVPP2_PRS_RI_L3_ADDR_MASK);
2760 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2761 MVPP2_PRS_IPV4_DIP_AI_BIT);
2762 /* Unmask all ports */
2763 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2765 /* Update shadow table and hw entry */
2766 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2767 mvpp2_prs_hw_write(priv, &pe);
2772 /* Initialize entries for IPv6 */
2773 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2775 struct mvpp2_prs_entry pe;
2778 /* Set entries for TCP, UDP and ICMP over IPv6 */
2779 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2780 MVPP2_PRS_RI_L4_TCP,
2781 MVPP2_PRS_RI_L4_PROTO_MASK);
2785 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2786 MVPP2_PRS_RI_L4_UDP,
2787 MVPP2_PRS_RI_L4_PROTO_MASK);
2791 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2792 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2793 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2794 MVPP2_PRS_RI_CPU_CODE_MASK |
2795 MVPP2_PRS_RI_UDF3_MASK);
2799 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2800 /* Result Info: UDF7=1, DS lite */
2801 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2802 MVPP2_PRS_RI_UDF7_IP6_LITE,
2803 MVPP2_PRS_RI_UDF7_MASK);
2807 /* IPv6 multicast */
2808 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2812 /* Entry for checking hop limit */
2813 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2814 MVPP2_PE_LAST_FREE_TID);
2818 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2819 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2822 /* Finished: go to flowid generation */
2823 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2824 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2825 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2826 MVPP2_PRS_RI_DROP_MASK,
2827 MVPP2_PRS_RI_L3_PROTO_MASK |
2828 MVPP2_PRS_RI_DROP_MASK);
2830 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2831 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2832 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2834 /* Update shadow table and hw entry */
2835 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2836 mvpp2_prs_hw_write(priv, &pe);
2838 /* Default IPv6 entry for unknown protocols */
2839 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2840 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2841 pe.index = MVPP2_PE_IP6_PROTO_UN;
2843 /* Finished: go to flowid generation */
2844 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2845 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2847 MVPP2_PRS_RI_L4_PROTO_MASK);
2848 /* Set L4 offset relatively to our current place */
2849 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2850 sizeof(struct ipv6hdr) - 4,
2851 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2853 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2854 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2855 /* Unmask all ports */
2856 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2858 /* Update shadow table and hw entry */
2859 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2860 mvpp2_prs_hw_write(priv, &pe);
2862 /* Default IPv6 entry for unknown ext protocols */
2863 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2865 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2867 /* Finished: go to flowid generation */
2868 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2869 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2870 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2871 MVPP2_PRS_RI_L4_PROTO_MASK);
2873 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2874 MVPP2_PRS_IPV6_EXT_AI_BIT);
2875 /* Unmask all ports */
2876 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2878 /* Update shadow table and hw entry */
2879 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2880 mvpp2_prs_hw_write(priv, &pe);
2882 /* Default IPv6 entry for unicast address */
2883 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2884 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2885 pe.index = MVPP2_PE_IP6_ADDR_UN;
2887 /* Finished: go to IPv6 again */
2888 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2889 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2890 MVPP2_PRS_RI_L3_ADDR_MASK);
2891 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2892 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2893 /* Shift back to IPV6 NH */
2894 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2896 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2897 /* Unmask all ports */
2898 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2900 /* Update shadow table and hw entry */
2901 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2902 mvpp2_prs_hw_write(priv, &pe);
2907 /* Parser default initialization */
2908 static int mvpp2_prs_default_init(struct platform_device *pdev,
2913 /* Enable tcam table */
2914 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2916 /* Clear all tcam and sram entries */
2917 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2918 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2919 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2920 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2922 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2923 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2924 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2927 /* Invalidate all tcam entries */
2928 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2929 mvpp2_prs_hw_inv(priv, index);
2931 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2932 sizeof(struct mvpp2_prs_shadow),
2934 if (!priv->prs_shadow)
2937 /* Always start from lookup = 0 */
2938 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2939 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2940 MVPP2_PRS_PORT_LU_MAX, 0);
2942 mvpp2_prs_def_flow_init(priv);
2944 mvpp2_prs_mh_init(priv);
2946 mvpp2_prs_mac_init(priv);
2948 mvpp2_prs_dsa_init(priv);
2950 err = mvpp2_prs_etype_init(priv);
2954 err = mvpp2_prs_vlan_init(pdev, priv);
2958 err = mvpp2_prs_pppoe_init(priv);
2962 err = mvpp2_prs_ip6_init(priv);
2966 err = mvpp2_prs_ip4_init(priv);
2973 /* Compare MAC DA with tcam entry data */
2974 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2975 const u8 *da, unsigned char *mask)
2977 unsigned char tcam_byte, tcam_mask;
2980 for (index = 0; index < ETH_ALEN; index++) {
2981 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2982 if (tcam_mask != mask[index])
2985 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2992 /* Find tcam entry with matched pair <MAC DA, port> */
2993 static struct mvpp2_prs_entry *
2994 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2995 unsigned char *mask, int udf_type)
2997 struct mvpp2_prs_entry *pe;
3000 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3003 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3005 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3006 for (tid = MVPP2_PE_FIRST_FREE_TID;
3007 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3008 unsigned int entry_pmap;
3010 if (!priv->prs_shadow[tid].valid ||
3011 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3012 (priv->prs_shadow[tid].udf != udf_type))
3016 mvpp2_prs_hw_read(priv, pe);
3017 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3019 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3028 /* Update parser's mac da entry */
3029 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3030 const u8 *da, bool add)
3032 struct mvpp2_prs_entry *pe;
3033 unsigned int pmap, len, ri;
3034 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3037 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3038 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3039 MVPP2_PRS_UDF_MAC_DEF);
3046 /* Create new TCAM entry */
3047 /* Find first range mac entry*/
3048 for (tid = MVPP2_PE_FIRST_FREE_TID;
3049 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3050 if (priv->prs_shadow[tid].valid &&
3051 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3052 (priv->prs_shadow[tid].udf ==
3053 MVPP2_PRS_UDF_MAC_RANGE))
3056 /* Go through the all entries from first to last */
3057 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3068 /* Mask all ports */
3069 mvpp2_prs_tcam_port_map_set(pe, 0);
3072 /* Update port mask */
3073 mvpp2_prs_tcam_port_set(pe, port, add);
3075 /* Invalidate the entry if no ports are left enabled */
3076 pmap = mvpp2_prs_tcam_port_map_get(pe);
3082 mvpp2_prs_hw_inv(priv, pe->index);
3083 priv->prs_shadow[pe->index].valid = false;
3088 /* Continue - set next lookup */
3089 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3091 /* Set match on DA */
3094 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3096 /* Set result info bits */
3097 if (is_broadcast_ether_addr(da))
3098 ri = MVPP2_PRS_RI_L2_BCAST;
3099 else if (is_multicast_ether_addr(da))
3100 ri = MVPP2_PRS_RI_L2_MCAST;
3102 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3104 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3105 MVPP2_PRS_RI_MAC_ME_MASK);
3106 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3107 MVPP2_PRS_RI_MAC_ME_MASK);
3109 /* Shift to ethertype */
3110 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3111 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3113 /* Update shadow table and hw entry */
3114 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3115 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3116 mvpp2_prs_hw_write(priv, pe);
3123 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3125 struct mvpp2_port *port = netdev_priv(dev);
3128 /* Remove old parser entry */
3129 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3134 /* Add new parser entry */
3135 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3139 /* Set addr in the device */
3140 ether_addr_copy(dev->dev_addr, da);
3145 /* Delete all port's multicast simple (not range) entries */
3146 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3148 struct mvpp2_prs_entry pe;
3151 for (tid = MVPP2_PE_FIRST_FREE_TID;
3152 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3153 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3155 if (!priv->prs_shadow[tid].valid ||
3156 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3157 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3160 /* Only simple mac entries */
3162 mvpp2_prs_hw_read(priv, &pe);
3164 /* Read mac addr from entry */
3165 for (index = 0; index < ETH_ALEN; index++)
3166 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3169 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3170 /* Delete this entry */
3171 mvpp2_prs_mac_da_accept(priv, port, da, false);
3175 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3178 case MVPP2_TAG_TYPE_EDSA:
3179 /* Add port to EDSA entries */
3180 mvpp2_prs_dsa_tag_set(priv, port, true,
3181 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3182 mvpp2_prs_dsa_tag_set(priv, port, true,
3183 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3184 /* Remove port from DSA entries */
3185 mvpp2_prs_dsa_tag_set(priv, port, false,
3186 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3187 mvpp2_prs_dsa_tag_set(priv, port, false,
3188 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3191 case MVPP2_TAG_TYPE_DSA:
3192 /* Add port to DSA entries */
3193 mvpp2_prs_dsa_tag_set(priv, port, true,
3194 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3195 mvpp2_prs_dsa_tag_set(priv, port, true,
3196 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3197 /* Remove port from EDSA entries */
3198 mvpp2_prs_dsa_tag_set(priv, port, false,
3199 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3200 mvpp2_prs_dsa_tag_set(priv, port, false,
3201 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3204 case MVPP2_TAG_TYPE_MH:
3205 case MVPP2_TAG_TYPE_NONE:
3206 /* Remove port form EDSA and DSA entries */
3207 mvpp2_prs_dsa_tag_set(priv, port, false,
3208 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3209 mvpp2_prs_dsa_tag_set(priv, port, false,
3210 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3211 mvpp2_prs_dsa_tag_set(priv, port, false,
3212 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3213 mvpp2_prs_dsa_tag_set(priv, port, false,
3214 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3218 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3225 /* Set prs flow for the port */
3226 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3228 struct mvpp2_prs_entry *pe;
3231 pe = mvpp2_prs_flow_find(port->priv, port->id);
3233 /* Such entry not exist */
3235 /* Go through the all entires from last to first */
3236 tid = mvpp2_prs_tcam_first_free(port->priv,
3237 MVPP2_PE_LAST_FREE_TID,
3238 MVPP2_PE_FIRST_FREE_TID);
3242 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3246 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3250 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3251 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3253 /* Update shadow table */
3254 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3257 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3258 mvpp2_prs_hw_write(port->priv, pe);
3264 /* Classifier configuration routines */
3266 /* Update classification flow table registers */
3267 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3268 struct mvpp2_cls_flow_entry *fe)
3270 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3271 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3272 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3273 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3276 /* Update classification lookup table register */
3277 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3278 struct mvpp2_cls_lookup_entry *le)
3282 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3283 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3284 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3287 /* Classifier default initialization */
3288 static void mvpp2_cls_init(struct mvpp2 *priv)
3290 struct mvpp2_cls_lookup_entry le;
3291 struct mvpp2_cls_flow_entry fe;
3294 /* Enable classifier */
3295 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3297 /* Clear classifier flow table */
3298 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
3299 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3301 mvpp2_cls_flow_write(priv, &fe);
3304 /* Clear classifier lookup table */
3306 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3309 mvpp2_cls_lookup_write(priv, &le);
3312 mvpp2_cls_lookup_write(priv, &le);
3316 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3318 struct mvpp2_cls_lookup_entry le;
3321 /* Set way for the port */
3322 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3323 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3324 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3326 /* Pick the entry to be accessed in lookup ID decoding table
3327 * according to the way and lkpid.
3329 le.lkpid = port->id;
3333 /* Set initial CPU queue for receiving packets */
3334 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3335 le.data |= port->first_rxq;
3337 /* Disable classification engines */
3338 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3340 /* Update lookup ID table entry */
3341 mvpp2_cls_lookup_write(port->priv, &le);
3344 /* Set CPU queue number for oversize packets */
3345 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3349 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3350 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3352 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3353 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3355 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3356 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3357 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3360 /* Buffer Manager configuration routines */
3363 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3365 struct mvpp2_bm_pool *bm_pool, int size)
3370 size_bytes = sizeof(u32) * size;
3371 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3372 &bm_pool->phys_addr,
3374 if (!bm_pool->virt_addr)
3377 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3378 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3379 bm_pool->phys_addr);
3380 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3381 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3385 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3386 bm_pool->phys_addr);
3387 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3389 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3390 val |= MVPP2_BM_START_MASK;
3391 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3393 bm_pool->type = MVPP2_BM_FREE;
3394 bm_pool->size = size;
3395 bm_pool->pkt_size = 0;
3396 bm_pool->buf_num = 0;
3397 atomic_set(&bm_pool->in_use, 0);
3402 /* Set pool buffer size */
3403 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3404 struct mvpp2_bm_pool *bm_pool,
3409 bm_pool->buf_size = buf_size;
3411 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3412 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3415 /* Free all buffers from the pool */
3416 static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
3420 for (i = 0; i < bm_pool->buf_num; i++) {
3423 /* Get buffer virtual address (indirect access) */
3424 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3425 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3428 dev_kfree_skb_any((struct sk_buff *)vaddr);
3431 /* Update BM driver with number of buffers removed from pool */
3432 bm_pool->buf_num -= i;
3436 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3438 struct mvpp2_bm_pool *bm_pool)
3442 mvpp2_bm_bufs_free(priv, bm_pool);
3443 if (bm_pool->buf_num) {
3444 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3448 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3449 val |= MVPP2_BM_STOP_MASK;
3450 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3452 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3454 bm_pool->phys_addr);
3458 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3462 struct mvpp2_bm_pool *bm_pool;
3464 /* Create all pools with maximum size */
3465 size = MVPP2_BM_POOL_SIZE_MAX;
3466 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3467 bm_pool = &priv->bm_pools[i];
3469 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3471 goto err_unroll_pools;
3472 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3477 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3478 for (i = i - 1; i >= 0; i--)
3479 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3483 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3487 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3488 /* Mask BM all interrupts */
3489 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3490 /* Clear BM cause register */
3491 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3494 /* Allocate and initialize BM pools */
3495 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3496 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3497 if (!priv->bm_pools)
3500 err = mvpp2_bm_pools_init(pdev, priv);
3506 /* Attach long pool to rxq */
3507 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3508 int lrxq, int long_pool)
3513 /* Get queue physical ID */
3514 prxq = port->rxqs[lrxq]->id;
3516 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3517 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3518 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3519 MVPP2_RXQ_POOL_LONG_MASK);
3521 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3524 /* Attach short pool to rxq */
3525 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3526 int lrxq, int short_pool)
3531 /* Get queue physical ID */
3532 prxq = port->rxqs[lrxq]->id;
3534 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3535 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3536 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3537 MVPP2_RXQ_POOL_SHORT_MASK);
3539 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3542 /* Allocate skb for BM pool */
3543 static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3544 struct mvpp2_bm_pool *bm_pool,
3545 dma_addr_t *buf_phys_addr,
3548 struct sk_buff *skb;
3549 dma_addr_t phys_addr;
3551 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3555 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3556 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3558 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3559 dev_kfree_skb_any(skb);
3562 *buf_phys_addr = phys_addr;
3567 /* Set pool number in a BM cookie */
3568 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3572 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3573 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3578 /* Get pool number from a BM cookie */
3579 static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3581 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3584 /* Release buffer to BM */
3585 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3586 u32 buf_phys_addr, u32 buf_virt_addr)
3588 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3589 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3592 /* Release multicast buffer */
3593 static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3594 u32 buf_phys_addr, u32 buf_virt_addr,
3599 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3600 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3602 mvpp2_bm_pool_put(port, pool,
3603 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3607 /* Refill BM pool */
3608 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3609 u32 phys_addr, u32 cookie)
3611 int pool = mvpp2_bm_cookie_pool_get(bm);
3613 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3616 /* Allocate buffers for the pool */
3617 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3618 struct mvpp2_bm_pool *bm_pool, int buf_num)
3620 struct sk_buff *skb;
3621 int i, buf_size, total_size;
3623 dma_addr_t phys_addr;
3625 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3626 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3629 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3630 netdev_err(port->dev,
3631 "cannot allocate %d buffers for pool %d\n",
3632 buf_num, bm_pool->id);
3636 bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3637 for (i = 0; i < buf_num; i++) {
3638 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3642 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3645 /* Update BM driver with number of buffers added to pool */
3646 bm_pool->buf_num += i;
3647 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3649 netdev_dbg(port->dev,
3650 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3651 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3652 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3654 netdev_dbg(port->dev,
3655 "%s pool %d: %d of %d buffers added\n",
3656 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3657 bm_pool->id, i, buf_num);
3661 /* Notify the driver that BM pool is being used as specific type and return the
3662 * pool pointer on success
3664 static struct mvpp2_bm_pool *
3665 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3671 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3672 netdev_err(port->dev, "mixing pool types is forbidden\n");
3676 if (new_pool->type == MVPP2_BM_FREE)
3677 new_pool->type = type;
3679 /* Allocate buffers in case BM pool is used as long pool, but packet
3680 * size doesn't match MTU or BM pool hasn't being used yet
3682 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3683 (new_pool->pkt_size == 0)) {
3686 /* Set default buffer number or free all the buffers in case
3687 * the pool is not empty
3689 pkts_num = new_pool->buf_num;
3691 pkts_num = type == MVPP2_BM_SWF_LONG ?
3692 MVPP2_BM_LONG_BUF_NUM :
3693 MVPP2_BM_SHORT_BUF_NUM;
3695 mvpp2_bm_bufs_free(port->priv, new_pool);
3697 new_pool->pkt_size = pkt_size;
3699 /* Allocate buffers for this pool */
3700 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3701 if (num != pkts_num) {
3702 WARN(1, "pool %d: %d of %d allocated\n",
3703 new_pool->id, num, pkts_num);
3708 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3709 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3714 /* Initialize pools for swf */
3715 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3719 if (!port->pool_long) {
3721 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3724 if (!port->pool_long)
3727 port->pool_long->port_map |= (1 << port->id);
3729 for (rxq = 0; rxq < rxq_number; rxq++)
3730 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3733 if (!port->pool_short) {
3735 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3737 MVPP2_BM_SHORT_PKT_SIZE);
3738 if (!port->pool_short)
3741 port->pool_short->port_map |= (1 << port->id);
3743 for (rxq = 0; rxq < rxq_number; rxq++)
3744 mvpp2_rxq_short_pool_set(port, rxq,
3745 port->pool_short->id);
3751 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3753 struct mvpp2_port *port = netdev_priv(dev);
3754 struct mvpp2_bm_pool *port_pool = port->pool_long;
3755 int num, pkts_num = port_pool->buf_num;
3756 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3758 /* Update BM pool with new buffer size */
3759 mvpp2_bm_bufs_free(port->priv, port_pool);
3760 if (port_pool->buf_num) {
3761 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3765 port_pool->pkt_size = pkt_size;
3766 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3767 if (num != pkts_num) {
3768 WARN(1, "pool %d: %d of %d allocated\n",
3769 port_pool->id, num, pkts_num);
3773 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3774 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3776 netdev_update_features(dev);
3780 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3782 int cpu, cpu_mask = 0;
3784 for_each_present_cpu(cpu)
3785 cpu_mask |= 1 << cpu;
3786 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3787 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3790 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3792 int cpu, cpu_mask = 0;
3794 for_each_present_cpu(cpu)
3795 cpu_mask |= 1 << cpu;
3796 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3797 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3800 /* Mask the current CPU's Rx/Tx interrupts */
3801 static void mvpp2_interrupts_mask(void *arg)
3803 struct mvpp2_port *port = arg;
3805 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3808 /* Unmask the current CPU's Rx/Tx interrupts */
3809 static void mvpp2_interrupts_unmask(void *arg)
3811 struct mvpp2_port *port = arg;
3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3818 /* Port configuration routines */
3820 static void mvpp2_port_mii_set(struct mvpp2_port *port)
3824 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3826 switch (port->phy_interface) {
3827 case PHY_INTERFACE_MODE_SGMII:
3828 val |= MVPP2_GMAC_INBAND_AN_MASK;
3830 case PHY_INTERFACE_MODE_RGMII:
3831 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3833 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3836 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3839 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3843 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3844 val |= MVPP2_GMAC_FC_ADV_EN;
3845 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3848 static void mvpp2_port_enable(struct mvpp2_port *port)
3852 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3853 val |= MVPP2_GMAC_PORT_EN_MASK;
3854 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3855 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3858 static void mvpp2_port_disable(struct mvpp2_port *port)
3862 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3863 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3864 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3867 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3868 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3872 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3873 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3874 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3877 /* Configure loopback port */
3878 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3882 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3884 if (port->speed == 1000)
3885 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3887 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3889 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3890 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3892 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3894 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3897 static void mvpp2_port_reset(struct mvpp2_port *port)
3901 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3902 ~MVPP2_GMAC_PORT_RESET_MASK;
3903 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3905 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3906 MVPP2_GMAC_PORT_RESET_MASK)
3910 /* Change maximum receive size of the port */
3911 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3915 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3916 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3917 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3918 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3919 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3922 /* Set defaults to the MVPP2 port */
3923 static void mvpp2_defaults_set(struct mvpp2_port *port)
3925 int tx_port_num, val, queue, ptxq, lrxq;
3927 /* Configure port to loopback if needed */
3928 if (port->flags & MVPP2_F_LOOPBACK)
3929 mvpp2_port_loopback_set(port);
3931 /* Update TX FIFO MIN Threshold */
3932 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3933 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3934 /* Min. TX threshold must be less than minimal packet length */
3935 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3936 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3938 /* Disable Legacy WRR, Disable EJP, Release from reset */
3939 tx_port_num = mvpp2_egress_port(port);
3940 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3942 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3944 /* Close bandwidth for all queues */
3945 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3946 ptxq = mvpp2_txq_phys(port->id, queue);
3947 mvpp2_write(port->priv,
3948 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3951 /* Set refill period to 1 usec, refill tokens
3952 * and bucket size to maximum
3954 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3955 port->priv->tclk / USEC_PER_SEC);
3956 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3957 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3958 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3959 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3960 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3961 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3962 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3964 /* Set MaximumLowLatencyPacketSize value to 256 */
3965 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3966 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3967 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3969 /* Enable Rx cache snoop */
3970 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3971 queue = port->rxqs[lrxq]->id;
3972 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3973 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3974 MVPP2_SNOOP_BUF_HDR_MASK;
3975 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3978 /* At default, mask all interrupts to all present cpus */
3979 mvpp2_interrupts_disable(port);
3982 /* Enable/disable receiving packets */
3983 static void mvpp2_ingress_enable(struct mvpp2_port *port)
3988 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3989 queue = port->rxqs[lrxq]->id;
3990 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3991 val &= ~MVPP2_RXQ_DISABLE_MASK;
3992 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3996 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4001 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4002 queue = port->rxqs[lrxq]->id;
4003 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4004 val |= MVPP2_RXQ_DISABLE_MASK;
4005 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4009 /* Enable transmit via physical egress queue
4010 * - HW starts take descriptors from DRAM
4012 static void mvpp2_egress_enable(struct mvpp2_port *port)
4016 int tx_port_num = mvpp2_egress_port(port);
4018 /* Enable all initialized TXs. */
4020 for (queue = 0; queue < txq_number; queue++) {
4021 struct mvpp2_tx_queue *txq = port->txqs[queue];
4023 if (txq->descs != NULL)
4024 qmap |= (1 << queue);
4027 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4028 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4031 /* Disable transmit via physical egress queue
4032 * - HW doesn't take descriptors from DRAM
4034 static void mvpp2_egress_disable(struct mvpp2_port *port)
4038 int tx_port_num = mvpp2_egress_port(port);
4040 /* Issue stop command for active channels only */
4041 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4042 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4043 MVPP2_TXP_SCHED_ENQ_MASK;
4045 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4046 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4048 /* Wait for all Tx activity to terminate. */
4051 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4052 netdev_warn(port->dev,
4053 "Tx stop timed out, status=0x%08x\n",
4060 /* Check port TX Command register that all
4061 * Tx queues are stopped
4063 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4064 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4067 /* Rx descriptors helper methods */
4069 /* Get number of Rx descriptors occupied by received packets */
4071 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4073 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4075 return val & MVPP2_RXQ_OCCUPIED_MASK;
4078 /* Update Rx queue status with the number of occupied and available
4079 * Rx descriptor slots.
4082 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4083 int used_count, int free_count)
4085 /* Decrement the number of used descriptors and increment count
4086 * increment the number of free descriptors.
4088 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4090 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4093 /* Get pointer to next RX descriptor to be processed by SW */
4094 static inline struct mvpp2_rx_desc *
4095 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4097 int rx_desc = rxq->next_desc_to_proc;
4099 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4100 prefetch(rxq->descs + rxq->next_desc_to_proc);
4101 return rxq->descs + rx_desc;
4104 /* Set rx queue offset */
4105 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4106 int prxq, int offset)
4110 /* Convert offset from bytes to units of 32 bytes */
4111 offset = offset >> 5;
4113 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4114 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4117 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4118 MVPP2_RXQ_PACKET_OFFSET_MASK);
4120 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4123 /* Obtain BM cookie information from descriptor */
4124 static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4126 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4127 MVPP2_RXD_BM_POOL_ID_OFFS;
4128 int cpu = smp_processor_id();
4130 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4131 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4134 /* Tx descriptors helper methods */
4136 /* Get number of Tx descriptors waiting to be transmitted by HW */
4137 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4138 struct mvpp2_tx_queue *txq)
4142 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4143 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4145 return val & MVPP2_TXQ_PENDING_MASK;
4148 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4149 static struct mvpp2_tx_desc *
4150 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4152 int tx_desc = txq->next_desc_to_proc;
4154 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4155 return txq->descs + tx_desc;
4158 /* Update HW with number of aggregated Tx descriptors to be sent */
4159 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4161 /* aggregated access - relevant TXQ number is written in TX desc */
4162 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4166 /* Check if there are enough free descriptors in aggregated txq.
4167 * If not, update the number of occupied descriptors and repeat the check.
4169 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4170 struct mvpp2_tx_queue *aggr_txq, int num)
4172 if ((aggr_txq->count + num) > aggr_txq->size) {
4173 /* Update number of occupied aggregated Tx descriptors */
4174 int cpu = smp_processor_id();
4175 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4177 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4180 if ((aggr_txq->count + num) > aggr_txq->size)
4186 /* Reserved Tx descriptors allocation request */
4187 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4188 struct mvpp2_tx_queue *txq, int num)
4192 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4193 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4195 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4197 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4200 /* Check if there are enough reserved descriptors for transmission.
4201 * If not, request chunk of reserved descriptors and check again.
4203 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4204 struct mvpp2_tx_queue *txq,
4205 struct mvpp2_txq_pcpu *txq_pcpu,
4208 int req, cpu, desc_count;
4210 if (txq_pcpu->reserved_num >= num)
4213 /* Not enough descriptors reserved! Update the reserved descriptor
4214 * count and check again.
4218 /* Compute total of used descriptors */
4219 for_each_present_cpu(cpu) {
4220 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4222 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4223 desc_count += txq_pcpu_aux->count;
4224 desc_count += txq_pcpu_aux->reserved_num;
4227 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4231 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4234 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4236 /* OK, the descriptor cound has been updated: check again. */
4237 if (txq_pcpu->reserved_num < num)
4242 /* Release the last allocated Tx descriptor. Useful to handle DMA
4243 * mapping failures in the Tx path.
4245 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4247 if (txq->next_desc_to_proc == 0)
4248 txq->next_desc_to_proc = txq->last_desc - 1;
4250 txq->next_desc_to_proc--;
4253 /* Set Tx descriptors fields relevant for CSUM calculation */
4254 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4255 int ip_hdr_len, int l4_proto)
4259 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4260 * G_L4_chk, L4_type required only for checksum calculation
4262 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4263 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4264 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4266 if (l3_proto == swab16(ETH_P_IP)) {
4267 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4268 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4270 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4273 if (l4_proto == IPPROTO_TCP) {
4274 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4275 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4276 } else if (l4_proto == IPPROTO_UDP) {
4277 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4278 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4280 command |= MVPP2_TXD_L4_CSUM_NOT;
4286 /* Get number of sent descriptors and decrement counter.
4287 * The number of sent descriptors is returned.
4290 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4291 struct mvpp2_tx_queue *txq)
4295 /* Reading status reg resets transmitted descriptor counter */
4296 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4298 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4299 MVPP2_TRANSMITTED_COUNT_OFFSET;
4302 static void mvpp2_txq_sent_counter_clear(void *arg)
4304 struct mvpp2_port *port = arg;
4307 for (queue = 0; queue < txq_number; queue++) {
4308 int id = port->txqs[queue]->id;
4310 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4314 /* Set max sizes for Tx queues */
4315 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4318 int txq, tx_port_num;
4320 mtu = port->pkt_size * 8;
4321 if (mtu > MVPP2_TXP_MTU_MAX)
4322 mtu = MVPP2_TXP_MTU_MAX;
4324 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4327 /* Indirect access to registers */
4328 tx_port_num = mvpp2_egress_port(port);
4329 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4332 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4333 val &= ~MVPP2_TXP_MTU_MAX;
4335 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4337 /* TXP token size and all TXQs token size must be larger that MTU */
4338 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4339 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4342 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4344 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4347 for (txq = 0; txq < txq_number; txq++) {
4348 val = mvpp2_read(port->priv,
4349 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4350 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4354 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4356 mvpp2_write(port->priv,
4357 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4363 /* Set the number of packets that will be received before Rx interrupt
4364 * will be generated by HW.
4366 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4367 struct mvpp2_rx_queue *rxq, u32 pkts)
4371 val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
4372 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4373 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
4375 rxq->pkts_coal = pkts;
4378 /* Set the time delay in usec before Rx interrupt */
4379 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4380 struct mvpp2_rx_queue *rxq, u32 usec)
4384 val = (port->priv->tclk / USEC_PER_SEC) * usec;
4385 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4387 rxq->time_coal = usec;
4390 /* Free Tx queue skbuffs */
4391 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4392 struct mvpp2_tx_queue *txq,
4393 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4397 for (i = 0; i < num; i++) {
4398 dma_addr_t buf_phys_addr =
4399 txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4400 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4402 mvpp2_txq_inc_get(txq_pcpu);
4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4408 skb_headlen(skb), DMA_TO_DEVICE);
4409 dev_kfree_skb_any(skb);
4413 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4416 int queue = fls(cause) - 1;
4418 return port->rxqs[queue];
4421 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4424 int queue = fls(cause) - 1;
4426 return port->txqs[queue];
4429 /* Handle end of transmission */
4430 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4431 struct mvpp2_txq_pcpu *txq_pcpu)
4433 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4436 if (txq_pcpu->cpu != smp_processor_id())
4437 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4439 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4442 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4444 txq_pcpu->count -= tx_done;
4446 if (netif_tx_queue_stopped(nq))
4447 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4448 netif_tx_wake_queue(nq);
4451 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4453 struct mvpp2_tx_queue *txq;
4454 struct mvpp2_txq_pcpu *txq_pcpu;
4455 unsigned int tx_todo = 0;
4458 txq = mvpp2_get_tx_queue(port, cause);
4462 txq_pcpu = this_cpu_ptr(txq->pcpu);
4464 if (txq_pcpu->count) {
4465 mvpp2_txq_done(port, txq, txq_pcpu);
4466 tx_todo += txq_pcpu->count;
4469 cause &= ~(1 << txq->log_id);
4474 /* Rx/Tx queue initialization/cleanup methods */
4476 /* Allocate and initialize descriptors for aggr TXQ */
4477 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4478 struct mvpp2_tx_queue *aggr_txq,
4479 int desc_num, int cpu,
4482 /* Allocate memory for TX descriptors */
4483 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4484 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4485 &aggr_txq->descs_phys, GFP_KERNEL);
4486 if (!aggr_txq->descs)
4489 /* Make sure descriptor address is cache line size aligned */
4490 BUG_ON(aggr_txq->descs !=
4491 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4493 aggr_txq->last_desc = aggr_txq->size - 1;
4495 /* Aggr TXQ no reset WA */
4496 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4497 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4499 /* Set Tx descriptors queue starting address */
4500 /* indirect access */
4501 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4502 aggr_txq->descs_phys);
4503 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4508 /* Create a specified Rx queue */
4509 static int mvpp2_rxq_init(struct mvpp2_port *port,
4510 struct mvpp2_rx_queue *rxq)
4513 rxq->size = port->rx_ring_size;
4515 /* Allocate memory for RX descriptors */
4516 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4517 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4518 &rxq->descs_phys, GFP_KERNEL);
4522 BUG_ON(rxq->descs !=
4523 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4525 rxq->last_desc = rxq->size - 1;
4527 /* Zero occupied and non-occupied counters - direct access */
4528 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4530 /* Set Rx descriptors queue starting address - indirect access */
4531 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4532 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4533 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4534 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4537 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4539 /* Set coalescing pkts and time */
4540 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
4541 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
4543 /* Add number of descriptors ready for receiving packets */
4544 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4549 /* Push packets received by the RXQ to BM pool */
4550 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4551 struct mvpp2_rx_queue *rxq)
4555 rx_received = mvpp2_rxq_received(port, rxq->id);
4559 for (i = 0; i < rx_received; i++) {
4560 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4561 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4563 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4564 rx_desc->buf_cookie);
4566 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4569 /* Cleanup Rx queue */
4570 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4571 struct mvpp2_rx_queue *rxq)
4573 mvpp2_rxq_drop_pkts(port, rxq);
4576 dma_free_coherent(port->dev->dev.parent,
4577 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4583 rxq->next_desc_to_proc = 0;
4584 rxq->descs_phys = 0;
4586 /* Clear Rx descriptors queue starting address and size;
4587 * free descriptor number
4589 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4590 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4591 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4592 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4595 /* Create and initialize a Tx queue */
4596 static int mvpp2_txq_init(struct mvpp2_port *port,
4597 struct mvpp2_tx_queue *txq)
4600 int cpu, desc, desc_per_txq, tx_port_num;
4601 struct mvpp2_txq_pcpu *txq_pcpu;
4603 txq->size = port->tx_ring_size;
4605 /* Allocate memory for Tx descriptors */
4606 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4607 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4608 &txq->descs_phys, GFP_KERNEL);
4612 /* Make sure descriptor address is cache line size aligned */
4613 BUG_ON(txq->descs !=
4614 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4616 txq->last_desc = txq->size - 1;
4618 /* Set Tx descriptors queue starting address - indirect access */
4619 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4620 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4621 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4622 MVPP2_TXQ_DESC_SIZE_MASK);
4623 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4624 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4625 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4626 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4627 val &= ~MVPP2_TXQ_PENDING_MASK;
4628 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4630 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4631 * for each existing TXQ.
4632 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4633 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4636 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4637 (txq->log_id * desc_per_txq);
4639 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4640 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4641 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4643 /* WRR / EJP configuration - indirect access */
4644 tx_port_num = mvpp2_egress_port(port);
4645 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4647 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4648 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4649 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4650 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4651 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4653 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4654 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4657 for_each_present_cpu(cpu) {
4658 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4659 txq_pcpu->size = txq->size;
4660 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4661 sizeof(*txq_pcpu->tx_skb),
4663 if (!txq_pcpu->tx_skb)
4666 txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4667 sizeof(dma_addr_t), GFP_KERNEL);
4668 if (!txq_pcpu->tx_buffs)
4671 txq_pcpu->count = 0;
4672 txq_pcpu->reserved_num = 0;
4673 txq_pcpu->txq_put_index = 0;
4674 txq_pcpu->txq_get_index = 0;
4680 for_each_present_cpu(cpu) {
4681 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4682 kfree(txq_pcpu->tx_skb);
4683 kfree(txq_pcpu->tx_buffs);
4686 dma_free_coherent(port->dev->dev.parent,
4687 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4688 txq->descs, txq->descs_phys);
4693 /* Free allocated TXQ resources */
4694 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4695 struct mvpp2_tx_queue *txq)
4697 struct mvpp2_txq_pcpu *txq_pcpu;
4700 for_each_present_cpu(cpu) {
4701 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4702 kfree(txq_pcpu->tx_skb);
4703 kfree(txq_pcpu->tx_buffs);
4707 dma_free_coherent(port->dev->dev.parent,
4708 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4709 txq->descs, txq->descs_phys);
4713 txq->next_desc_to_proc = 0;
4714 txq->descs_phys = 0;
4716 /* Set minimum bandwidth for disabled TXQs */
4717 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4719 /* Set Tx descriptors queue starting address and size */
4720 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4721 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4722 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4725 /* Cleanup Tx ports */
4726 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4728 struct mvpp2_txq_pcpu *txq_pcpu;
4729 int delay, pending, cpu;
4732 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4733 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4734 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4735 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4737 /* The napi queue has been stopped so wait for all packets
4738 * to be transmitted.
4742 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4743 netdev_warn(port->dev,
4744 "port %d: cleaning queue %d timed out\n",
4745 port->id, txq->log_id);
4751 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4754 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4755 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4757 for_each_present_cpu(cpu) {
4758 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4760 /* Release all packets */
4761 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4764 txq_pcpu->count = 0;
4765 txq_pcpu->txq_put_index = 0;
4766 txq_pcpu->txq_get_index = 0;
4770 /* Cleanup all Tx queues */
4771 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4773 struct mvpp2_tx_queue *txq;
4777 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4779 /* Reset Tx ports and delete Tx queues */
4780 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4781 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4783 for (queue = 0; queue < txq_number; queue++) {
4784 txq = port->txqs[queue];
4785 mvpp2_txq_clean(port, txq);
4786 mvpp2_txq_deinit(port, txq);
4789 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4791 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4792 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4795 /* Cleanup all Rx queues */
4796 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4800 for (queue = 0; queue < rxq_number; queue++)
4801 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4804 /* Init all Rx queues for port */
4805 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4809 for (queue = 0; queue < rxq_number; queue++) {
4810 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4817 mvpp2_cleanup_rxqs(port);
4821 /* Init all tx queues for port */
4822 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4824 struct mvpp2_tx_queue *txq;
4827 for (queue = 0; queue < txq_number; queue++) {
4828 txq = port->txqs[queue];
4829 err = mvpp2_txq_init(port, txq);
4834 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4838 mvpp2_cleanup_txqs(port);
4842 /* The callback for per-port interrupt */
4843 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4845 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4847 mvpp2_interrupts_disable(port);
4849 napi_schedule(&port->napi);
4855 static void mvpp2_link_event(struct net_device *dev)
4857 struct mvpp2_port *port = netdev_priv(dev);
4858 struct phy_device *phydev = port->phy_dev;
4859 int status_change = 0;
4863 if ((port->speed != phydev->speed) ||
4864 (port->duplex != phydev->duplex)) {
4867 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4868 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4869 MVPP2_GMAC_CONFIG_GMII_SPEED |
4870 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4871 MVPP2_GMAC_AN_SPEED_EN |
4872 MVPP2_GMAC_AN_DUPLEX_EN);
4875 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4877 if (phydev->speed == SPEED_1000)
4878 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4879 else if (phydev->speed == SPEED_100)
4880 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4882 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4884 port->duplex = phydev->duplex;
4885 port->speed = phydev->speed;
4889 if (phydev->link != port->link) {
4890 if (!phydev->link) {
4895 port->link = phydev->link;
4899 if (status_change) {
4901 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4902 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4903 MVPP2_GMAC_FORCE_LINK_DOWN);
4904 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4905 mvpp2_egress_enable(port);
4906 mvpp2_ingress_enable(port);
4908 mvpp2_ingress_disable(port);
4909 mvpp2_egress_disable(port);
4911 phy_print_status(phydev);
4915 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4919 if (!port_pcpu->timer_scheduled) {
4920 port_pcpu->timer_scheduled = true;
4921 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4922 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4923 HRTIMER_MODE_REL_PINNED);
4927 static void mvpp2_tx_proc_cb(unsigned long data)
4929 struct net_device *dev = (struct net_device *)data;
4930 struct mvpp2_port *port = netdev_priv(dev);
4931 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4932 unsigned int tx_todo, cause;
4934 if (!netif_running(dev))
4936 port_pcpu->timer_scheduled = false;
4938 /* Process all the Tx queues */
4939 cause = (1 << txq_number) - 1;
4940 tx_todo = mvpp2_tx_done(port, cause);
4942 /* Set the timer in case not all the packets were processed */
4944 mvpp2_timer_set(port_pcpu);
4947 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4949 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4950 struct mvpp2_port_pcpu,
4953 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4955 return HRTIMER_NORESTART;
4958 /* Main RX/TX processing routines */
4960 /* Display more error info */
4961 static void mvpp2_rx_error(struct mvpp2_port *port,
4962 struct mvpp2_rx_desc *rx_desc)
4964 u32 status = rx_desc->status;
4966 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4967 case MVPP2_RXD_ERR_CRC:
4968 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4969 status, rx_desc->data_size);
4971 case MVPP2_RXD_ERR_OVERRUN:
4972 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4973 status, rx_desc->data_size);
4975 case MVPP2_RXD_ERR_RESOURCE:
4976 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4977 status, rx_desc->data_size);
4982 /* Handle RX checksum offload */
4983 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4984 struct sk_buff *skb)
4986 if (((status & MVPP2_RXD_L3_IP4) &&
4987 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4988 (status & MVPP2_RXD_L3_IP6))
4989 if (((status & MVPP2_RXD_L4_UDP) ||
4990 (status & MVPP2_RXD_L4_TCP)) &&
4991 (status & MVPP2_RXD_L4_CSUM_OK)) {
4993 skb->ip_summed = CHECKSUM_UNNECESSARY;
4997 skb->ip_summed = CHECKSUM_NONE;
5000 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5001 static int mvpp2_rx_refill(struct mvpp2_port *port,
5002 struct mvpp2_bm_pool *bm_pool,
5003 u32 bm, int is_recycle)
5005 struct sk_buff *skb;
5006 dma_addr_t phys_addr;
5009 (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
5012 /* No recycle or too many buffers are in use, so allocate a new skb */
5013 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5017 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
5018 atomic_dec(&bm_pool->in_use);
5022 /* Handle tx checksum */
5023 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5025 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5029 if (skb->protocol == htons(ETH_P_IP)) {
5030 struct iphdr *ip4h = ip_hdr(skb);
5032 /* Calculate IPv4 checksum and L4 checksum */
5033 ip_hdr_len = ip4h->ihl;
5034 l4_proto = ip4h->protocol;
5035 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5036 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5038 /* Read l4_protocol from one of IPv6 extra headers */
5039 if (skb_network_header_len(skb) > 0)
5040 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5041 l4_proto = ip6h->nexthdr;
5043 return MVPP2_TXD_L4_CSUM_NOT;
5046 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5047 skb->protocol, ip_hdr_len, l4_proto);
5050 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5053 static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5054 struct mvpp2_rx_desc *rx_desc)
5056 struct mvpp2_buff_hdr *buff_hdr;
5057 struct sk_buff *skb;
5058 u32 rx_status = rx_desc->status;
5061 u32 buff_phys_addr_next;
5062 u32 buff_virt_addr_next;
5066 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5067 MVPP2_RXD_BM_POOL_ID_OFFS;
5068 buff_phys_addr = rx_desc->buf_phys_addr;
5069 buff_virt_addr = rx_desc->buf_cookie;
5072 skb = (struct sk_buff *)buff_virt_addr;
5073 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5075 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5077 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5078 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5080 /* Release buffer */
5081 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5082 buff_virt_addr, mc_id);
5084 buff_phys_addr = buff_phys_addr_next;
5085 buff_virt_addr = buff_virt_addr_next;
5087 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5090 /* Main rx processing */
5091 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5092 struct mvpp2_rx_queue *rxq)
5094 struct net_device *dev = port->dev;
5095 int rx_received, rx_filled, i;
5099 /* Get number of received packets and clamp the to-do */
5100 rx_received = mvpp2_rxq_received(port, rxq->id);
5101 if (rx_todo > rx_received)
5102 rx_todo = rx_received;
5105 for (i = 0; i < rx_todo; i++) {
5106 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5107 struct mvpp2_bm_pool *bm_pool;
5108 struct sk_buff *skb;
5110 int pool, rx_bytes, err;
5113 rx_status = rx_desc->status;
5114 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5116 bm = mvpp2_bm_cookie_build(rx_desc);
5117 pool = mvpp2_bm_cookie_pool_get(bm);
5118 bm_pool = &port->priv->bm_pools[pool];
5119 /* Check if buffer header is used */
5120 if (rx_status & MVPP2_RXD_BUF_HDR) {
5121 mvpp2_buff_hdr_rx(port, rx_desc);
5125 /* In case of an error, release the requested buffer pointer
5126 * to the Buffer Manager. This request process is controlled
5127 * by the hardware, and the information about the buffer is
5128 * comprised by the RX descriptor.
5130 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5131 dev->stats.rx_errors++;
5132 mvpp2_rx_error(port, rx_desc);
5133 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5134 rx_desc->buf_cookie);
5138 skb = (struct sk_buff *)rx_desc->buf_cookie;
5141 rcvd_bytes += rx_bytes;
5142 atomic_inc(&bm_pool->in_use);
5144 skb_reserve(skb, MVPP2_MH_SIZE);
5145 skb_put(skb, rx_bytes);
5146 skb->protocol = eth_type_trans(skb, dev);
5147 mvpp2_rx_csum(port, rx_status, skb);
5149 napi_gro_receive(&port->napi, skb);
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5159 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5161 u64_stats_update_begin(&stats->syncp);
5162 stats->rx_packets += rcvd_pkts;
5163 stats->rx_bytes += rcvd_bytes;
5164 u64_stats_update_end(&stats->syncp);
5167 /* Update Rx queue management counters */
5169 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
5175 tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5176 struct mvpp2_tx_desc *desc)
5178 dma_unmap_single(dev, desc->buf_phys_addr,
5179 desc->data_size, DMA_TO_DEVICE);
5180 mvpp2_txq_desc_put(txq);
5183 /* Handle tx fragmentation processing */
5184 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5185 struct mvpp2_tx_queue *aggr_txq,
5186 struct mvpp2_tx_queue *txq)
5188 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5189 struct mvpp2_tx_desc *tx_desc;
5191 dma_addr_t buf_phys_addr;
5193 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5194 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5195 void *addr = page_address(frag->page.p) + frag->page_offset;
5197 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5198 tx_desc->phys_txq = txq->id;
5199 tx_desc->data_size = frag->size;
5201 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5204 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5205 mvpp2_txq_desc_put(txq);
5209 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5210 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5212 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5213 /* Last descriptor */
5214 tx_desc->command = MVPP2_TXD_L_DESC;
5215 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5217 /* Descriptor in the middle: Not First, Not Last */
5218 tx_desc->command = 0;
5219 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5226 /* Release all descriptors that were used to map fragments of
5227 * this packet, as well as the corresponding DMA mappings
5229 for (i = i - 1; i >= 0; i--) {
5230 tx_desc = txq->descs + i;
5231 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5237 /* Main tx processing */
5238 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5240 struct mvpp2_port *port = netdev_priv(dev);
5241 struct mvpp2_tx_queue *txq, *aggr_txq;
5242 struct mvpp2_txq_pcpu *txq_pcpu;
5243 struct mvpp2_tx_desc *tx_desc;
5244 dma_addr_t buf_phys_addr;
5249 txq_id = skb_get_queue_mapping(skb);
5250 txq = port->txqs[txq_id];
5251 txq_pcpu = this_cpu_ptr(txq->pcpu);
5252 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5254 frags = skb_shinfo(skb)->nr_frags + 1;
5256 /* Check number of available descriptors */
5257 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5258 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5264 /* Get a descriptor for the first part of the packet */
5265 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5266 tx_desc->phys_txq = txq->id;
5267 tx_desc->data_size = skb_headlen(skb);
5269 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5270 tx_desc->data_size, DMA_TO_DEVICE);
5271 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5272 mvpp2_txq_desc_put(txq);
5276 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5277 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5279 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5282 /* First and Last descriptor */
5283 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5284 tx_desc->command = tx_cmd;
5285 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5287 /* First but not Last */
5288 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5289 tx_desc->command = tx_cmd;
5290 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5292 /* Continue with other skb fragments */
5293 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5294 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5300 txq_pcpu->reserved_num -= frags;
5301 txq_pcpu->count += frags;
5302 aggr_txq->count += frags;
5304 /* Enable transmit */
5306 mvpp2_aggr_txq_pend_desc_add(port, frags);
5308 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5309 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5311 netif_tx_stop_queue(nq);
5315 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5317 u64_stats_update_begin(&stats->syncp);
5318 stats->tx_packets++;
5319 stats->tx_bytes += skb->len;
5320 u64_stats_update_end(&stats->syncp);
5322 dev->stats.tx_dropped++;
5323 dev_kfree_skb_any(skb);
5326 /* Finalize TX processing */
5327 if (txq_pcpu->count >= txq->done_pkts_coal)
5328 mvpp2_txq_done(port, txq, txq_pcpu);
5330 /* Set the timer in case not all frags were processed */
5331 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5332 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5334 mvpp2_timer_set(port_pcpu);
5337 return NETDEV_TX_OK;
5340 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5342 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5343 netdev_err(dev, "FCS error\n");
5344 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5345 netdev_err(dev, "rx fifo overrun error\n");
5346 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5347 netdev_err(dev, "tx fifo underrun error\n");
5350 static int mvpp2_poll(struct napi_struct *napi, int budget)
5352 u32 cause_rx_tx, cause_rx, cause_misc;
5354 struct mvpp2_port *port = netdev_priv(napi->dev);
5356 /* Rx/Tx cause register
5358 * Bits 0-15: each bit indicates received packets on the Rx queue
5359 * (bit 0 is for Rx queue 0).
5361 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5362 * (bit 16 is for Tx queue 0).
5364 * Each CPU has its own Rx/Tx cause register
5366 cause_rx_tx = mvpp2_read(port->priv,
5367 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5368 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5369 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5372 mvpp2_cause_error(port->dev, cause_misc);
5374 /* Clear the cause register */
5375 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5376 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5377 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5380 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5382 /* Process RX packets */
5383 cause_rx |= port->pending_cause_rx;
5384 while (cause_rx && budget > 0) {
5386 struct mvpp2_rx_queue *rxq;
5388 rxq = mvpp2_get_rx_queue(port, cause_rx);
5392 count = mvpp2_rx(port, budget, rxq);
5396 /* Clear the bit associated to this Rx queue
5397 * so that next iteration will continue from
5398 * the next Rx queue.
5400 cause_rx &= ~(1 << rxq->logic_rxq);
5406 napi_complete(napi);
5408 mvpp2_interrupts_enable(port);
5410 port->pending_cause_rx = cause_rx;
5414 /* Set hw internals when starting port */
5415 static void mvpp2_start_dev(struct mvpp2_port *port)
5417 mvpp2_gmac_max_rx_size_set(port);
5418 mvpp2_txp_max_tx_size_set(port);
5420 napi_enable(&port->napi);
5422 /* Enable interrupts on all CPUs */
5423 mvpp2_interrupts_enable(port);
5425 mvpp2_port_enable(port);
5426 phy_start(port->phy_dev);
5427 netif_tx_start_all_queues(port->dev);
5430 /* Set hw internals when stopping port */
5431 static void mvpp2_stop_dev(struct mvpp2_port *port)
5433 /* Stop new packets from arriving to RXQs */
5434 mvpp2_ingress_disable(port);
5438 /* Disable interrupts on all CPUs */
5439 mvpp2_interrupts_disable(port);
5441 napi_disable(&port->napi);
5443 netif_carrier_off(port->dev);
5444 netif_tx_stop_all_queues(port->dev);
5446 mvpp2_egress_disable(port);
5447 mvpp2_port_disable(port);
5448 phy_stop(port->phy_dev);
5451 /* Return positive if MTU is valid */
5452 static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
5455 netdev_err(dev, "cannot change mtu to less than 68\n");
5459 /* 9676 == 9700 - 20 and rounding to 8 */
5461 netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
5465 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5466 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5467 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5468 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5474 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5475 struct ethtool_ringparam *ring)
5477 u16 new_rx_pending = ring->rx_pending;
5478 u16 new_tx_pending = ring->tx_pending;
5480 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5483 if (ring->rx_pending > MVPP2_MAX_RXD)
5484 new_rx_pending = MVPP2_MAX_RXD;
5485 else if (!IS_ALIGNED(ring->rx_pending, 16))
5486 new_rx_pending = ALIGN(ring->rx_pending, 16);
5488 if (ring->tx_pending > MVPP2_MAX_TXD)
5489 new_tx_pending = MVPP2_MAX_TXD;
5490 else if (!IS_ALIGNED(ring->tx_pending, 32))
5491 new_tx_pending = ALIGN(ring->tx_pending, 32);
5493 if (ring->rx_pending != new_rx_pending) {
5494 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5495 ring->rx_pending, new_rx_pending);
5496 ring->rx_pending = new_rx_pending;
5499 if (ring->tx_pending != new_tx_pending) {
5500 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5501 ring->tx_pending, new_tx_pending);
5502 ring->tx_pending = new_tx_pending;
5508 static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5510 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5512 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5513 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5514 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5515 addr[0] = (mac_addr_h >> 24) & 0xFF;
5516 addr[1] = (mac_addr_h >> 16) & 0xFF;
5517 addr[2] = (mac_addr_h >> 8) & 0xFF;
5518 addr[3] = mac_addr_h & 0xFF;
5519 addr[4] = mac_addr_m & 0xFF;
5520 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5523 static int mvpp2_phy_connect(struct mvpp2_port *port)
5525 struct phy_device *phy_dev;
5527 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5528 port->phy_interface);
5530 netdev_err(port->dev, "cannot connect to phy\n");
5533 phy_dev->supported &= PHY_GBIT_FEATURES;
5534 phy_dev->advertising = phy_dev->supported;
5536 port->phy_dev = phy_dev;
5544 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5546 phy_disconnect(port->phy_dev);
5547 port->phy_dev = NULL;
5550 static int mvpp2_open(struct net_device *dev)
5552 struct mvpp2_port *port = netdev_priv(dev);
5553 unsigned char mac_bcast[ETH_ALEN] = {
5554 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5557 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5559 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5562 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5563 dev->dev_addr, true);
5565 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5568 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5570 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5573 err = mvpp2_prs_def_flow(port);
5575 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5579 /* Allocate the Rx/Tx queues */
5580 err = mvpp2_setup_rxqs(port);
5582 netdev_err(port->dev, "cannot allocate Rx queues\n");
5586 err = mvpp2_setup_txqs(port);
5588 netdev_err(port->dev, "cannot allocate Tx queues\n");
5589 goto err_cleanup_rxqs;
5592 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5594 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5595 goto err_cleanup_txqs;
5598 /* In default link is down */
5599 netif_carrier_off(port->dev);
5601 err = mvpp2_phy_connect(port);
5605 /* Unmask interrupts on all CPUs */
5606 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5608 mvpp2_start_dev(port);
5613 free_irq(port->irq, port);
5615 mvpp2_cleanup_txqs(port);
5617 mvpp2_cleanup_rxqs(port);
5621 static int mvpp2_stop(struct net_device *dev)
5623 struct mvpp2_port *port = netdev_priv(dev);
5624 struct mvpp2_port_pcpu *port_pcpu;
5627 mvpp2_stop_dev(port);
5628 mvpp2_phy_disconnect(port);
5630 /* Mask interrupts on all CPUs */
5631 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5633 free_irq(port->irq, port);
5634 for_each_present_cpu(cpu) {
5635 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5637 hrtimer_cancel(&port_pcpu->tx_done_timer);
5638 port_pcpu->timer_scheduled = false;
5639 tasklet_kill(&port_pcpu->tx_done_tasklet);
5641 mvpp2_cleanup_rxqs(port);
5642 mvpp2_cleanup_txqs(port);
5647 static void mvpp2_set_rx_mode(struct net_device *dev)
5649 struct mvpp2_port *port = netdev_priv(dev);
5650 struct mvpp2 *priv = port->priv;
5651 struct netdev_hw_addr *ha;
5653 bool allmulti = dev->flags & IFF_ALLMULTI;
5655 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5656 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5657 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5659 /* Remove all port->id's mcast enries */
5660 mvpp2_prs_mcast_del_all(priv, id);
5662 if (allmulti && !netdev_mc_empty(dev)) {
5663 netdev_for_each_mc_addr(ha, dev)
5664 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5668 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5670 struct mvpp2_port *port = netdev_priv(dev);
5671 const struct sockaddr *addr = p;
5674 if (!is_valid_ether_addr(addr->sa_data)) {
5675 err = -EADDRNOTAVAIL;
5679 if (!netif_running(dev)) {
5680 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5683 /* Reconfigure parser to accept the original MAC address */
5684 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5689 mvpp2_stop_dev(port);
5691 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5695 /* Reconfigure parser accept the original MAC address */
5696 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5700 mvpp2_start_dev(port);
5701 mvpp2_egress_enable(port);
5702 mvpp2_ingress_enable(port);
5706 netdev_err(dev, "fail to change MAC address\n");
5710 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5712 struct mvpp2_port *port = netdev_priv(dev);
5715 mtu = mvpp2_check_mtu_valid(dev, mtu);
5721 if (!netif_running(dev)) {
5722 err = mvpp2_bm_update_mtu(dev, mtu);
5724 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5728 /* Reconfigure BM to the original MTU */
5729 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5734 mvpp2_stop_dev(port);
5736 err = mvpp2_bm_update_mtu(dev, mtu);
5738 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5742 /* Reconfigure BM to the original MTU */
5743 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5748 mvpp2_start_dev(port);
5749 mvpp2_egress_enable(port);
5750 mvpp2_ingress_enable(port);
5755 netdev_err(dev, "fail to change MTU\n");
5759 static struct rtnl_link_stats64 *
5760 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5762 struct mvpp2_port *port = netdev_priv(dev);
5766 for_each_possible_cpu(cpu) {
5767 struct mvpp2_pcpu_stats *cpu_stats;
5773 cpu_stats = per_cpu_ptr(port->stats, cpu);
5775 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5776 rx_packets = cpu_stats->rx_packets;
5777 rx_bytes = cpu_stats->rx_bytes;
5778 tx_packets = cpu_stats->tx_packets;
5779 tx_bytes = cpu_stats->tx_bytes;
5780 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5782 stats->rx_packets += rx_packets;
5783 stats->rx_bytes += rx_bytes;
5784 stats->tx_packets += tx_packets;
5785 stats->tx_bytes += tx_bytes;
5788 stats->rx_errors = dev->stats.rx_errors;
5789 stats->rx_dropped = dev->stats.rx_dropped;
5790 stats->tx_dropped = dev->stats.tx_dropped;
5795 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5797 struct mvpp2_port *port = netdev_priv(dev);
5803 ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
5805 mvpp2_link_event(dev);
5810 /* Ethtool methods */
5812 /* Get settings (phy address, speed) for ethtools */
5813 static int mvpp2_ethtool_get_settings(struct net_device *dev,
5814 struct ethtool_cmd *cmd)
5816 struct mvpp2_port *port = netdev_priv(dev);
5820 return phy_ethtool_gset(port->phy_dev, cmd);
5823 /* Set settings (phy address, speed) for ethtools */
5824 static int mvpp2_ethtool_set_settings(struct net_device *dev,
5825 struct ethtool_cmd *cmd)
5827 struct mvpp2_port *port = netdev_priv(dev);
5831 return phy_ethtool_sset(port->phy_dev, cmd);
5834 /* Set interrupt coalescing for ethtools */
5835 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5836 struct ethtool_coalesce *c)
5838 struct mvpp2_port *port = netdev_priv(dev);
5841 for (queue = 0; queue < rxq_number; queue++) {
5842 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5844 rxq->time_coal = c->rx_coalesce_usecs;
5845 rxq->pkts_coal = c->rx_max_coalesced_frames;
5846 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
5847 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
5850 for (queue = 0; queue < txq_number; queue++) {
5851 struct mvpp2_tx_queue *txq = port->txqs[queue];
5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5859 /* get coalescing for ethtools */
5860 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5861 struct ethtool_coalesce *c)
5863 struct mvpp2_port *port = netdev_priv(dev);
5865 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5866 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5867 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5871 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5872 struct ethtool_drvinfo *drvinfo)
5874 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5875 sizeof(drvinfo->driver));
5876 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5877 sizeof(drvinfo->version));
5878 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5879 sizeof(drvinfo->bus_info));
5882 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5883 struct ethtool_ringparam *ring)
5885 struct mvpp2_port *port = netdev_priv(dev);
5887 ring->rx_max_pending = MVPP2_MAX_RXD;
5888 ring->tx_max_pending = MVPP2_MAX_TXD;
5889 ring->rx_pending = port->rx_ring_size;
5890 ring->tx_pending = port->tx_ring_size;
5893 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5894 struct ethtool_ringparam *ring)
5896 struct mvpp2_port *port = netdev_priv(dev);
5897 u16 prev_rx_ring_size = port->rx_ring_size;
5898 u16 prev_tx_ring_size = port->tx_ring_size;
5901 err = mvpp2_check_ringparam_valid(dev, ring);
5905 if (!netif_running(dev)) {
5906 port->rx_ring_size = ring->rx_pending;
5907 port->tx_ring_size = ring->tx_pending;
5911 /* The interface is running, so we have to force a
5912 * reallocation of the queues
5914 mvpp2_stop_dev(port);
5915 mvpp2_cleanup_rxqs(port);
5916 mvpp2_cleanup_txqs(port);
5918 port->rx_ring_size = ring->rx_pending;
5919 port->tx_ring_size = ring->tx_pending;
5921 err = mvpp2_setup_rxqs(port);
5923 /* Reallocate Rx queues with the original ring size */
5924 port->rx_ring_size = prev_rx_ring_size;
5925 ring->rx_pending = prev_rx_ring_size;
5926 err = mvpp2_setup_rxqs(port);
5930 err = mvpp2_setup_txqs(port);
5932 /* Reallocate Tx queues with the original ring size */
5933 port->tx_ring_size = prev_tx_ring_size;
5934 ring->tx_pending = prev_tx_ring_size;
5935 err = mvpp2_setup_txqs(port);
5937 goto err_clean_rxqs;
5940 mvpp2_start_dev(port);
5941 mvpp2_egress_enable(port);
5942 mvpp2_ingress_enable(port);
5947 mvpp2_cleanup_rxqs(port);
5949 netdev_err(dev, "fail to change ring parameters");
5955 static const struct net_device_ops mvpp2_netdev_ops = {
5956 .ndo_open = mvpp2_open,
5957 .ndo_stop = mvpp2_stop,
5958 .ndo_start_xmit = mvpp2_tx,
5959 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5960 .ndo_set_mac_address = mvpp2_set_mac_address,
5961 .ndo_change_mtu = mvpp2_change_mtu,
5962 .ndo_get_stats64 = mvpp2_get_stats64,
5963 .ndo_do_ioctl = mvpp2_ioctl,
5966 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5967 .get_link = ethtool_op_get_link,
5968 .get_settings = mvpp2_ethtool_get_settings,
5969 .set_settings = mvpp2_ethtool_set_settings,
5970 .set_coalesce = mvpp2_ethtool_set_coalesce,
5971 .get_coalesce = mvpp2_ethtool_get_coalesce,
5972 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5973 .get_ringparam = mvpp2_ethtool_get_ringparam,
5974 .set_ringparam = mvpp2_ethtool_set_ringparam,
5977 /* Driver initialization */
5979 static void mvpp2_port_power_up(struct mvpp2_port *port)
5981 mvpp2_port_mii_set(port);
5982 mvpp2_port_periodic_xon_disable(port);
5983 mvpp2_port_fc_adv_enable(port);
5984 mvpp2_port_reset(port);
5987 /* Initialize port HW */
5988 static int mvpp2_port_init(struct mvpp2_port *port)
5990 struct device *dev = port->dev->dev.parent;
5991 struct mvpp2 *priv = port->priv;
5992 struct mvpp2_txq_pcpu *txq_pcpu;
5993 int queue, cpu, err;
5995 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
5999 mvpp2_egress_disable(port);
6000 mvpp2_port_disable(port);
6002 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6007 /* Associate physical Tx queues to this port and initialize.
6008 * The mapping is predefined.
6010 for (queue = 0; queue < txq_number; queue++) {
6011 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6012 struct mvpp2_tx_queue *txq;
6014 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6018 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6021 goto err_free_percpu;
6024 txq->id = queue_phy_id;
6025 txq->log_id = queue;
6026 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6027 for_each_present_cpu(cpu) {
6028 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6029 txq_pcpu->cpu = cpu;
6032 port->txqs[queue] = txq;
6035 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6039 goto err_free_percpu;
6042 /* Allocate and initialize Rx queue for this port */
6043 for (queue = 0; queue < rxq_number; queue++) {
6044 struct mvpp2_rx_queue *rxq;
6046 /* Map physical Rx queue to port's logical Rx queue */
6047 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6049 goto err_free_percpu;
6050 /* Map this Rx queue to a physical queue */
6051 rxq->id = port->first_rxq + queue;
6052 rxq->port = port->id;
6053 rxq->logic_rxq = queue;
6055 port->rxqs[queue] = rxq;
6058 /* Configure Rx queue group interrupt for this port */
6059 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6061 /* Create Rx descriptor rings */
6062 for (queue = 0; queue < rxq_number; queue++) {
6063 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6065 rxq->size = port->rx_ring_size;
6066 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6067 rxq->time_coal = MVPP2_RX_COAL_USEC;
6070 mvpp2_ingress_disable(port);
6072 /* Port default configuration */
6073 mvpp2_defaults_set(port);
6075 /* Port's classifier configuration */
6076 mvpp2_cls_oversize_rxq_set(port);
6077 mvpp2_cls_port_config(port);
6079 /* Provide an initial Rx packet size */
6080 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6082 /* Initialize pools for swf */
6083 err = mvpp2_swf_bm_pool_init(port);
6085 goto err_free_percpu;
6090 for (queue = 0; queue < txq_number; queue++) {
6091 if (!port->txqs[queue])
6093 free_percpu(port->txqs[queue]->pcpu);
6098 /* Ports initialization */
6099 static int mvpp2_port_probe(struct platform_device *pdev,
6100 struct device_node *port_node,
6102 int *next_first_rxq)
6104 struct device_node *phy_node;
6105 struct mvpp2_port *port;
6106 struct mvpp2_port_pcpu *port_pcpu;
6107 struct net_device *dev;
6108 struct resource *res;
6109 const char *dt_mac_addr;
6110 const char *mac_from;
6111 char hw_mac_addr[ETH_ALEN];
6115 int priv_common_regs_num = 2;
6118 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6123 phy_node = of_parse_phandle(port_node, "phy", 0);
6125 dev_err(&pdev->dev, "missing phy\n");
6127 goto err_free_netdev;
6130 phy_mode = of_get_phy_mode(port_node);
6132 dev_err(&pdev->dev, "incorrect phy mode\n");
6134 goto err_free_netdev;
6137 if (of_property_read_u32(port_node, "port-id", &id)) {
6139 dev_err(&pdev->dev, "missing port-id value\n");
6140 goto err_free_netdev;
6143 dev->tx_queue_len = MVPP2_MAX_TXD;
6144 dev->watchdog_timeo = 5 * HZ;
6145 dev->netdev_ops = &mvpp2_netdev_ops;
6146 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6148 port = netdev_priv(dev);
6150 port->irq = irq_of_parse_and_map(port_node, 0);
6151 if (port->irq <= 0) {
6153 goto err_free_netdev;
6156 if (of_property_read_bool(port_node, "marvell,loopback"))
6157 port->flags |= MVPP2_F_LOOPBACK;
6161 port->first_rxq = *next_first_rxq;
6162 port->phy_node = phy_node;
6163 port->phy_interface = phy_mode;
6165 res = platform_get_resource(pdev, IORESOURCE_MEM,
6166 priv_common_regs_num + id);
6167 port->base = devm_ioremap_resource(&pdev->dev, res);
6168 if (IS_ERR(port->base)) {
6169 err = PTR_ERR(port->base);
6173 /* Alloc per-cpu stats */
6174 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6180 dt_mac_addr = of_get_mac_address(port_node);
6181 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6182 mac_from = "device tree";
6183 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6185 mvpp2_get_mac_address(port, hw_mac_addr);
6186 if (is_valid_ether_addr(hw_mac_addr)) {
6187 mac_from = "hardware";
6188 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6190 mac_from = "random";
6191 eth_hw_addr_random(dev);
6195 port->tx_ring_size = MVPP2_MAX_TXD;
6196 port->rx_ring_size = MVPP2_MAX_RXD;
6198 SET_NETDEV_DEV(dev, &pdev->dev);
6200 err = mvpp2_port_init(port);
6202 dev_err(&pdev->dev, "failed to init port %d\n", id);
6203 goto err_free_stats;
6205 mvpp2_port_power_up(port);
6207 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6210 goto err_free_txq_pcpu;
6213 for_each_present_cpu(cpu) {
6214 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6216 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6217 HRTIMER_MODE_REL_PINNED);
6218 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6219 port_pcpu->timer_scheduled = false;
6221 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6222 (unsigned long)dev);
6225 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6226 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6227 dev->features = features | NETIF_F_RXCSUM;
6228 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6229 dev->vlan_features |= features;
6231 err = register_netdev(dev);
6233 dev_err(&pdev->dev, "failed to register netdev\n");
6234 goto err_free_port_pcpu;
6236 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6238 /* Increment the first Rx queue number to be used by the next port */
6239 *next_first_rxq += rxq_number;
6240 priv->port_list[id] = port;
6244 free_percpu(port->pcpu);
6246 for (i = 0; i < txq_number; i++)
6247 free_percpu(port->txqs[i]->pcpu);
6249 free_percpu(port->stats);
6251 irq_dispose_mapping(port->irq);
6257 /* Ports removal routine */
6258 static void mvpp2_port_remove(struct mvpp2_port *port)
6262 unregister_netdev(port->dev);
6263 free_percpu(port->pcpu);
6264 free_percpu(port->stats);
6265 for (i = 0; i < txq_number; i++)
6266 free_percpu(port->txqs[i]->pcpu);
6267 irq_dispose_mapping(port->irq);
6268 free_netdev(port->dev);
6271 /* Initialize decoding windows */
6272 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6278 for (i = 0; i < 6; i++) {
6279 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6280 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6283 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6288 for (i = 0; i < dram->num_cs; i++) {
6289 const struct mbus_dram_window *cs = dram->cs + i;
6291 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6292 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6293 dram->mbus_dram_target_id);
6295 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6296 (cs->size - 1) & 0xffff0000);
6298 win_enable |= (1 << i);
6301 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6304 /* Initialize Rx FIFO's */
6305 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6309 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6310 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6311 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6312 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6313 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6316 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6317 MVPP2_RX_FIFO_PORT_MIN_PKT);
6318 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6321 /* Initialize network controller common part HW */
6322 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6324 const struct mbus_dram_target_info *dram_target_info;
6328 /* Checks for hardware constraints */
6329 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6330 (txq_number > MVPP2_MAX_TXQ)) {
6331 dev_err(&pdev->dev, "invalid queue size parameter\n");
6335 /* MBUS windows configuration */
6336 dram_target_info = mv_mbus_dram_info();
6337 if (dram_target_info)
6338 mvpp2_conf_mbus_windows(dram_target_info, priv);
6340 /* Disable HW PHY polling */
6341 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6342 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6343 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6345 /* Allocate and initialize aggregated TXQs */
6346 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6347 sizeof(struct mvpp2_tx_queue),
6349 if (!priv->aggr_txqs)
6352 for_each_present_cpu(i) {
6353 priv->aggr_txqs[i].id = i;
6354 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6355 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6356 MVPP2_AGGR_TXQ_SIZE, i, priv);
6362 mvpp2_rx_fifo_init(priv);
6364 /* Reset Rx queue group interrupt configuration */
6365 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6366 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6368 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6369 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6371 /* Allow cache snoop when transmiting packets */
6372 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6374 /* Buffer Manager initialization */
6375 err = mvpp2_bm_init(pdev, priv);
6379 /* Parser default initialization */
6380 err = mvpp2_prs_default_init(pdev, priv);
6384 /* Classifier default initialization */
6385 mvpp2_cls_init(priv);
6390 static int mvpp2_probe(struct platform_device *pdev)
6392 struct device_node *dn = pdev->dev.of_node;
6393 struct device_node *port_node;
6395 struct resource *res;
6396 int port_count, first_rxq;
6399 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6404 priv->base = devm_ioremap_resource(&pdev->dev, res);
6405 if (IS_ERR(priv->base))
6406 return PTR_ERR(priv->base);
6408 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6409 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6410 if (IS_ERR(priv->lms_base))
6411 return PTR_ERR(priv->lms_base);
6413 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6414 if (IS_ERR(priv->pp_clk))
6415 return PTR_ERR(priv->pp_clk);
6416 err = clk_prepare_enable(priv->pp_clk);
6420 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6421 if (IS_ERR(priv->gop_clk)) {
6422 err = PTR_ERR(priv->gop_clk);
6425 err = clk_prepare_enable(priv->gop_clk);
6429 /* Get system's tclk rate */
6430 priv->tclk = clk_get_rate(priv->pp_clk);
6432 /* Initialize network controller */
6433 err = mvpp2_init(pdev, priv);
6435 dev_err(&pdev->dev, "failed to initialize controller\n");
6439 port_count = of_get_available_child_count(dn);
6440 if (port_count == 0) {
6441 dev_err(&pdev->dev, "no ports enabled\n");
6446 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6447 sizeof(struct mvpp2_port *),
6449 if (!priv->port_list) {
6454 /* Initialize ports */
6456 for_each_available_child_of_node(dn, port_node) {
6457 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6462 platform_set_drvdata(pdev, priv);
6466 clk_disable_unprepare(priv->gop_clk);
6468 clk_disable_unprepare(priv->pp_clk);
6472 static int mvpp2_remove(struct platform_device *pdev)
6474 struct mvpp2 *priv = platform_get_drvdata(pdev);
6475 struct device_node *dn = pdev->dev.of_node;
6476 struct device_node *port_node;
6479 for_each_available_child_of_node(dn, port_node) {
6480 if (priv->port_list[i])
6481 mvpp2_port_remove(priv->port_list[i]);
6485 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6486 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6488 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6491 for_each_present_cpu(i) {
6492 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6494 dma_free_coherent(&pdev->dev,
6495 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6497 aggr_txq->descs_phys);
6500 clk_disable_unprepare(priv->pp_clk);
6501 clk_disable_unprepare(priv->gop_clk);
6506 static const struct of_device_id mvpp2_match[] = {
6507 { .compatible = "marvell,armada-375-pp2" },
6510 MODULE_DEVICE_TABLE(of, mvpp2_match);
6512 static struct platform_driver mvpp2_driver = {
6513 .probe = mvpp2_probe,
6514 .remove = mvpp2_remove,
6516 .name = MVPP2_DRIVER_NAME,
6517 .of_match_table = mvpp2_match,
6521 module_platform_driver(mvpp2_driver);
6523 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6524 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6525 MODULE_LICENSE("GPL v2");