1 /*********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2010 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 *********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/init.h>
31 #include <linux/etherdevice.h>
33 #include <linux/string.h>
36 #include <linux/xfrm.h>
38 #endif /* CONFIG_XFRM */
40 #include <asm/atomic.h>
42 #include <asm/octeon/octeon.h>
44 #include "ethernet-defines.h"
45 #include "octeon-ethernet.h"
46 #include "ethernet-tx.h"
47 #include "ethernet-util.h"
52 #include "cvmx-helper.h"
54 #include "cvmx-gmxx-defs.h"
57 * You can define GET_SKBUFF_QOS() to override how the skbuff output
58 * function determines which output queue is used. The default
59 * implementation always uses the base queue for the port. If, for
60 * example, you wanted to use the skb->priority fieid, define
61 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
63 #ifndef GET_SKBUFF_QOS
64 #define GET_SKBUFF_QOS(skb) 0
68 static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
71 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
73 cvmx_fau_atomic_add32(fau, -undo);
74 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
78 void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv)
81 int qos, queues_per_port;
82 queues_per_port = cvmx_pko_get_num_queues(priv->port);
83 /* Drain any pending packets in the free list */
84 for (qos = 0; qos < queues_per_port; qos++) {
85 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
87 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
88 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
90 while (skb_to_free > 0) {
91 dev_kfree_skb_any(skb_dequeue(&priv->tx_free_list[qos]));
97 enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer)
99 struct octeon_ethernet *priv = container_of(timer, struct octeon_ethernet, tx_restart_timer);
100 struct net_device *dev = cvm_oct_device[priv->port];
102 cvm_oct_free_tx_skbs(priv);
104 if (netif_queue_stopped(dev))
105 netif_wake_queue(dev);
107 return HRTIMER_NORESTART;
113 * @skb: Packet to send
114 * @dev: Device info structure
115 * Returns Always returns zero
117 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
119 cvmx_pko_command_word0_t pko_command;
120 union cvmx_buf_ptr hw_buffer;
121 uint64_t old_scratch;
122 uint64_t old_scratch2;
124 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
125 struct octeon_ethernet *priv = netdev_priv(dev);
126 struct sk_buff *to_free_list;
128 int32_t buffers_to_free;
130 #if REUSE_SKBUFFS_WITHOUT_FREE
131 unsigned char *fpa_head;
135 * Prefetch the private data structure. It is larger that one
141 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
142 * completely remove "qos" in the event neither interface
143 * supports multiple queues per port.
145 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
146 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
147 qos = GET_SKBUFF_QOS(skb);
150 else if (qos >= cvmx_pko_get_num_queues(priv->port))
155 if (USE_ASYNC_IOBDMA) {
156 /* Save scratch in case userspace is using it */
158 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
159 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
162 * Fetch and increment the number of packets to be
165 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
166 FAU_NUM_PACKET_BUFFERS_TO_FREE,
168 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
174 * The CN3XXX series of parts has an errata (GMX-401) which
175 * causes the GMX block to hang if a collision occurs towards
176 * the end of a <68 byte packet. As a workaround for this, we
177 * pad packets to be 68 bytes whenever we are in half duplex
178 * mode. We don't handle the case of having a small packet but
179 * no room to add the padding. The kernel should always give
180 * us at least a cache line
182 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
183 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
184 int interface = INTERFACE(priv->port);
185 int index = INDEX(priv->port);
188 /* We only need to pad packet in half duplex mode */
190 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
191 if (gmx_prt_cfg.s.duplex == 0) {
192 int add_bytes = 64 - skb->len;
193 if ((skb_tail_pointer(skb) + add_bytes) <=
194 skb_end_pointer(skb))
195 memset(__skb_put(skb, add_bytes), 0,
201 /* Build the PKO buffer pointer */
203 hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
204 hw_buffer.s.pool = 0;
206 (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
208 /* Build the PKO command */
210 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
211 pko_command.s.segs = 1;
212 pko_command.s.total_bytes = skb->len;
213 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
214 pko_command.s.subone0 = 1;
216 pko_command.s.dontfree = 1;
217 pko_command.s.reg0 = priv->fau + qos * 4;
219 * See if we can put this skb in the FPA pool. Any strange
220 * behavior from the Linux networking stack will most likely
221 * be caused by a bug in the following code. If some field is
222 * in use by the network stack and get carried over when a
223 * buffer is reused, bad thing may happen. If in doubt and
224 * you dont need the absolute best performance, disable the
225 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
226 * shown a 25% increase in performance under some loads.
228 #if REUSE_SKBUFFS_WITHOUT_FREE
229 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
230 if (unlikely(skb->data < fpa_head)) {
232 * printk("TX buffer beginning can't meet FPA
233 * alignment constraints\n");
235 goto dont_put_skbuff_in_hw;
238 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
240 printk("TX buffer isn't large enough for the FPA\n");
242 goto dont_put_skbuff_in_hw;
244 if (unlikely(skb_shared(skb))) {
246 printk("TX buffer sharing data with someone else\n");
248 goto dont_put_skbuff_in_hw;
250 if (unlikely(skb_cloned(skb))) {
252 printk("TX buffer has been cloned\n");
254 goto dont_put_skbuff_in_hw;
256 if (unlikely(skb_header_cloned(skb))) {
258 printk("TX buffer header has been cloned\n");
260 goto dont_put_skbuff_in_hw;
262 if (unlikely(skb->destructor)) {
264 printk("TX buffer has a destructor\n");
266 goto dont_put_skbuff_in_hw;
268 if (unlikely(skb_shinfo(skb)->nr_frags)) {
270 printk("TX buffer has fragments\n");
272 goto dont_put_skbuff_in_hw;
276 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
278 printk("TX buffer truesize has been changed\n");
280 goto dont_put_skbuff_in_hw;
284 * We can use this buffer in the FPA. We don't need the FAU
287 pko_command.s.reg0 = 0;
288 pko_command.s.dontfree = 0;
290 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
291 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
294 * The skbuff will be reused without ever being freed. We must
295 * cleanup a bunch of core things.
297 dst_release(skb_dst(skb));
298 skb_dst_set(skb, NULL);
300 secpath_put(skb->sp);
305 #ifdef CONFIG_NET_SCHED
307 #ifdef CONFIG_NET_CLS_ACT
309 #endif /* CONFIG_NET_CLS_ACT */
310 #endif /* CONFIG_NET_SCHED */
311 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
313 dont_put_skbuff_in_hw:
315 /* Check if we can use the hardware checksumming */
316 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
317 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
318 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
319 && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
320 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) {
321 /* Use hardware checksum calc */
322 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
325 if (USE_ASYNC_IOBDMA) {
326 /* Get the number of skbuffs in use by the hardware */
328 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
329 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
331 /* Get the number of skbuffs in use by the hardware */
332 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
335 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
338 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
341 * If we're sending faster than the receive can free them then
342 * don't do the HW free.
344 if ((buffers_to_free < -100) && !pko_command.s.dontfree) {
345 pko_command.s.dontfree = 1;
346 pko_command.s.reg0 = priv->fau + qos * 4;
349 if (pko_command.s.dontfree)
350 queue_type = QUEUE_CORE;
352 queue_type = QUEUE_HW;
354 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
356 /* Drop this packet if we have too many already queued to the HW */
357 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
358 if (dev->tx_queue_len != 0) {
359 /* Drop the lock when notifying the core. */
360 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
361 netif_stop_queue(dev);
362 hrtimer_start(&priv->tx_restart_timer,
363 priv->tx_restart_interval, HRTIMER_MODE_REL);
364 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
367 /* If not using normal queueing. */
368 queue_type = QUEUE_DROP;
373 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
376 /* Send the packet to the output queue */
377 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
379 pko_command, hw_buffer,
380 CVMX_PKO_LOCK_NONE))) {
381 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
382 queue_type = QUEUE_DROP;
387 switch (queue_type) {
389 skb->next = to_free_list;
391 priv->stats.tx_dropped++;
394 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
397 __skb_queue_tail(&priv->tx_free_list[qos], skb);
403 while (skb_to_free > 0) {
404 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
405 t->next = to_free_list;
410 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
412 /* Do the actual freeing outside of the lock. */
413 while (to_free_list) {
414 struct sk_buff *t = to_free_list;
415 to_free_list = to_free_list->next;
416 dev_kfree_skb_any(t);
419 if (USE_ASYNC_IOBDMA) {
420 /* Restore the scratch area */
421 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
422 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
429 * Packet transmit to the POW
431 * @skb: Packet to send
432 * @dev: Device info structure
433 * Returns Always returns zero
435 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
437 struct octeon_ethernet *priv = netdev_priv(dev);
441 /* Get a work queue entry */
442 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
443 if (unlikely(work == NULL)) {
444 DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
446 priv->stats.tx_dropped++;
451 /* Get a packet buffer */
452 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
453 if (unlikely(packet_buffer == NULL)) {
454 DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
456 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
457 priv->stats.tx_dropped++;
463 * Calculate where we need to copy the data to. We need to
464 * leave 8 bytes for a next pointer (unused). We also need to
465 * include any configure skip. Then we need to align the IP
466 * packet src and dest into the same 64bit word. The below
467 * calculation may add a little extra, but that doesn't
470 copy_location = packet_buffer + sizeof(uint64_t);
471 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
474 * We have to copy the packet since whoever processes this
475 * packet will free it to a hardware pool. We can't use the
476 * trick of counting outstanding packets like in
479 memcpy(copy_location, skb->data, skb->len);
482 * Fill in some of the work queue fields. We may need to add
483 * more if the software at the other end needs them.
485 work->hw_chksum = skb->csum;
486 work->len = skb->len;
487 work->ipprt = priv->port;
488 work->qos = priv->port & 0x7;
489 work->grp = pow_send_group;
490 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
491 work->tag = pow_send_group; /* FIXME */
492 /* Default to zero. Sets of zero later are commented out */
494 work->word2.s.bufs = 1;
495 work->packet_ptr.u64 = 0;
496 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
497 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
498 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
499 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
501 if (skb->protocol == htons(ETH_P_IP)) {
502 work->word2.s.ip_offset = 14;
504 work->word2.s.vlan_valid = 0; /* FIXME */
505 work->word2.s.vlan_cfi = 0; /* FIXME */
506 work->word2.s.vlan_id = 0; /* FIXME */
507 work->word2.s.dec_ipcomp = 0; /* FIXME */
509 work->word2.s.tcp_or_udp =
510 (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
511 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP);
514 work->word2.s.dec_ipsec = 0;
515 /* We only support IPv4 right now */
516 work->word2.s.is_v6 = 0;
517 /* Hardware would set to zero */
518 work->word2.s.software = 0;
519 /* No error, packet is internal */
520 work->word2.s.L4_error = 0;
522 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
523 || (ip_hdr(skb)->frag_off ==
526 /* Assume Linux is sending a good packet */
527 work->word2.s.IP_exc = 0;
529 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
530 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
532 /* This is an IP packet */
533 work->word2.s.not_IP = 0;
534 /* No error, packet is internal */
535 work->word2.s.rcv_error = 0;
536 /* No error, packet is internal */
537 work->word2.s.err_code = 0;
541 * When copying the data, include 4 bytes of the
542 * ethernet header to align the same way hardware
545 memcpy(work->packet_data, skb->data + 10,
546 sizeof(work->packet_data));
549 work->word2.snoip.vlan_valid = 0; /* FIXME */
550 work->word2.snoip.vlan_cfi = 0; /* FIXME */
551 work->word2.snoip.vlan_id = 0; /* FIXME */
552 work->word2.snoip.software = 0; /* Hardware would set to zero */
554 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
555 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
556 work->word2.snoip.is_bcast =
557 (skb->pkt_type == PACKET_BROADCAST);
558 work->word2.snoip.is_mcast =
559 (skb->pkt_type == PACKET_MULTICAST);
560 work->word2.snoip.not_IP = 1; /* IP was done up above */
562 /* No error, packet is internal */
563 work->word2.snoip.rcv_error = 0;
564 /* No error, packet is internal */
565 work->word2.snoip.err_code = 0;
567 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
570 /* Submit the packet to the POW */
571 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
573 priv->stats.tx_packets++;
574 priv->stats.tx_bytes += skb->len;
580 * This function frees all skb that are currently queued for TX.
582 * @dev: Device being shutdown
584 void cvm_oct_tx_shutdown(struct net_device *dev)
586 struct octeon_ethernet *priv = netdev_priv(dev);
590 for (qos = 0; qos < 16; qos++) {
591 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
592 while (skb_queue_len(&priv->tx_free_list[qos]))
593 dev_kfree_skb_any(__skb_dequeue
594 (&priv->tx_free_list[qos]));
595 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);