2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2010 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/cache.h>
14 #include <linux/cpumask.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/prefetch.h>
20 #include <linux/ratelimit.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
25 #include <linux/xfrm.h>
27 #endif /* CONFIG_XFRM */
29 #include <linux/atomic.h>
31 #include <asm/octeon/octeon.h>
33 #include "ethernet-defines.h"
34 #include "ethernet-mem.h"
35 #include "ethernet-rx.h"
36 #include "octeon-ethernet.h"
37 #include "ethernet-util.h"
39 #include <asm/octeon/cvmx-helper.h>
40 #include <asm/octeon/cvmx-wqe.h>
41 #include <asm/octeon/cvmx-fau.h>
42 #include <asm/octeon/cvmx-pow.h>
43 #include <asm/octeon/cvmx-pip.h>
44 #include <asm/octeon/cvmx-scratch.h>
46 #include <asm/octeon/cvmx-gmxx-defs.h>
48 static struct napi_struct cvm_oct_napi;
51 * cvm_oct_do_interrupt - interrupt handler.
53 * The interrupt occurs whenever the POW has packets in our group.
56 static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
58 /* Disable the IRQ and start napi_poll. */
59 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
60 napi_schedule(&cvm_oct_napi);
66 * cvm_oct_check_rcv_error - process receive errors
67 * @work: Work queue entry pointing to the packet.
69 * Returns Non-zero if the packet can be dropped, zero otherwise.
71 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
73 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
75 * Ignore length errors on min size packets. Some
76 * equipment incorrectly pads packets to 64+4FCS
77 * instead of 60+4FCS. Note these packets still get
78 * counted as frame errors.
80 } else if (work->word2.snoip.err_code == 5 ||
81 work->word2.snoip.err_code == 7) {
83 * We received a packet with either an alignment error
84 * or a FCS error. This may be signalling that we are
85 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
86 * off. If this is the case we need to parse the
87 * packet to determine if we can remove a non spec
88 * preamble and generate a correct packet.
90 int interface = cvmx_helper_get_interface_num(work->ipprt);
91 int index = cvmx_helper_get_interface_index_num(work->ipprt);
92 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
94 gmxx_rxx_frm_ctl.u64 =
95 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
96 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
99 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
102 while (i < work->len - 1) {
111 printk_ratelimited("Port %d received 0xd5 preamble\n",
114 work->packet_ptr.s.addr += i + 1;
116 } else if ((*ptr & 0xf) == 0xd) {
118 printk_ratelimited("Port %d received 0x?d preamble\n",
121 work->packet_ptr.s.addr += i;
123 for (i = 0; i < work->len; i++) {
125 ((*ptr & 0xf0) >> 4) |
126 ((*(ptr + 1) & 0xf) << 4);
130 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
133 cvmx_helper_dump_packet(work);
135 cvm_oct_free_work(work);
140 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
141 work->ipprt, work->word2.snoip.err_code);
142 cvm_oct_free_work(work);
150 * cvm_oct_napi_poll - the NAPI poll function.
151 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
152 * @budget: Maximum number of packets to receive.
154 * Returns the number of packets processed.
156 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
158 const int coreid = cvmx_get_core_num();
159 uint64_t old_group_mask;
160 uint64_t old_scratch;
162 int did_work_request = 0;
163 int packet_not_copied;
165 /* Prefetch cvm_oct_device since we know we need it soon */
166 prefetch(cvm_oct_device);
168 if (USE_ASYNC_IOBDMA) {
169 /* Save scratch in case userspace is using it */
171 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
174 /* Only allow work for our group (and preserve priorities) */
175 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
176 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
177 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
178 1ull << pow_receive_group);
179 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
181 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
182 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
183 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
186 if (USE_ASYNC_IOBDMA) {
187 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
188 did_work_request = 1;
191 while (rx_count < budget) {
192 struct sk_buff *skb = NULL;
193 struct sk_buff **pskb = NULL;
197 if (USE_ASYNC_IOBDMA && did_work_request)
198 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
200 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
203 did_work_request = 0;
205 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
206 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
207 1ull << pow_receive_group);
208 cvmx_write_csr(CVMX_SSO_WQ_INT,
209 1ull << pow_receive_group);
211 union cvmx_pow_wq_int wq_int;
214 wq_int.s.iq_dis = 1 << pow_receive_group;
215 wq_int.s.wq_int = 1 << pow_receive_group;
216 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
220 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
224 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
225 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
227 did_work_request = 1;
231 skb_in_hw = work->word2.s.bufs == 1;
232 if (likely(skb_in_hw)) {
234 prefetch(&skb->head);
237 prefetch(cvm_oct_device[work->ipprt]);
239 /* Immediately throw away all packets with receive errors */
240 if (unlikely(work->word2.snoip.rcv_error)) {
241 if (cvm_oct_check_rcv_error(work))
246 * We can only use the zero copy path if skbuffs are
247 * in the FPA pool and the packet fits in a single
250 if (likely(skb_in_hw)) {
251 skb->data = skb->head + work->packet_ptr.s.addr -
252 cvmx_ptr_to_phys(skb->head);
254 skb->len = work->len;
255 skb_set_tail_pointer(skb, skb->len);
256 packet_not_copied = 1;
259 * We have to copy the packet. First allocate
262 skb = dev_alloc_skb(work->len);
264 cvm_oct_free_work(work);
269 * Check if we've received a packet that was
270 * entirely stored in the work entry.
272 if (unlikely(work->word2.s.bufs == 0)) {
273 uint8_t *ptr = work->packet_data;
275 if (likely(!work->word2.s.not_IP)) {
277 * The beginning of the packet
278 * moves for IP packets.
280 if (work->word2.s.is_v6)
285 memcpy(skb_put(skb, work->len), ptr, work->len);
286 /* No packet buffers to free */
288 int segments = work->word2.s.bufs;
289 union cvmx_buf_ptr segment_ptr =
294 union cvmx_buf_ptr next_ptr =
295 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
298 * Octeon Errata PKI-100: The segment size is
299 * wrong. Until it is fixed, calculate the
300 * segment size based on the packet pool
301 * buffer size. When it is fixed, the
302 * following line should be replaced with this
303 * one: int segment_size =
304 * segment_ptr.s.size;
307 CVMX_FPA_PACKET_POOL_SIZE -
308 (segment_ptr.s.addr -
309 (((segment_ptr.s.addr >> 7) -
310 segment_ptr.s.back) << 7));
312 * Don't copy more than what
313 * is left in the packet.
315 if (segment_size > len)
317 /* Copy the data into the packet */
318 memcpy(skb_put(skb, segment_size),
319 cvmx_phys_to_ptr(segment_ptr.s.addr),
322 segment_ptr = next_ptr;
325 packet_not_copied = 0;
328 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
329 cvm_oct_device[work->ipprt])) {
330 struct net_device *dev = cvm_oct_device[work->ipprt];
331 struct octeon_ethernet *priv = netdev_priv(dev);
334 * Only accept packets for devices that are
337 if (likely(dev->flags & IFF_UP)) {
338 skb->protocol = eth_type_trans(skb, dev);
341 if (unlikely(work->word2.s.not_IP ||
342 work->word2.s.IP_exc ||
343 work->word2.s.L4_error ||
344 !work->word2.s.tcp_or_udp))
345 skb->ip_summed = CHECKSUM_NONE;
347 skb->ip_summed = CHECKSUM_UNNECESSARY;
349 /* Increment RX stats for virtual ports */
350 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
353 (atomic64_t *)&priv->stats.rx_packets);
354 atomic64_add(skb->len,
355 (atomic64_t *)&priv->stats.rx_bytes);
358 (atomic_t *)&priv->stats.rx_packets);
360 (atomic_t *)&priv->stats.rx_bytes);
363 netif_receive_skb(skb);
365 /* Drop any packet received for a device that isn't up */
367 printk_ratelimited("%s: Device not up, packet dropped\n",
372 (atomic64_t *)&priv->stats.rx_dropped);
375 (atomic_t *)&priv->stats.rx_dropped);
377 dev_kfree_skb_irq(skb);
381 * Drop any packet received for a device that
384 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
386 dev_kfree_skb_irq(skb);
389 * Check to see if the skbuff and work share the same
392 if (likely(packet_not_copied)) {
394 * This buffer needs to be replaced, increment
395 * the number of buffers we need to free by
398 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
401 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
403 cvm_oct_free_work(work);
406 /* Restore the original POW group mask */
407 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
408 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
409 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
411 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
414 if (USE_ASYNC_IOBDMA) {
415 /* Restore the scratch area */
416 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
418 cvm_oct_rx_refill_pool(0);
420 if (rx_count < budget && napi != NULL) {
423 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
428 #ifdef CONFIG_NET_POLL_CONTROLLER
430 * cvm_oct_poll_controller - poll for receive packets
433 * @dev: Device to poll. Unused
435 void cvm_oct_poll_controller(struct net_device *dev)
437 cvm_oct_napi_poll(NULL, 16);
441 void cvm_oct_rx_initialize(void)
444 struct net_device *dev_for_napi = NULL;
446 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
447 if (cvm_oct_device[i]) {
448 dev_for_napi = cvm_oct_device[i];
453 if (NULL == dev_for_napi)
454 panic("No net_devices were allocated.");
456 netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
458 napi_enable(&cvm_oct_napi);
460 /* Register an IRQ handler to receive POW interrupts */
461 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
462 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
465 panic("Could not acquire Ethernet IRQ %d\n",
466 OCTEON_IRQ_WORKQ0 + pow_receive_group);
468 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
470 /* Enable POW interrupt when our port has at least one packet */
471 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
472 union cvmx_sso_wq_int_thrx int_thr;
473 union cvmx_pow_wq_int_pc int_pc;
477 int_thr.s.tc_thr = 1;
478 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
483 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
485 union cvmx_pow_wq_int_thrx int_thr;
486 union cvmx_pow_wq_int_pc int_pc;
490 int_thr.s.tc_thr = 1;
491 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
496 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
499 /* Schedule NAPI now. This will indirectly enable the interrupt. */
500 napi_schedule(&cvm_oct_napi);
503 void cvm_oct_rx_shutdown(void)
505 netif_napi_del(&cvm_oct_napi);