2 * IBM Power Virtual Ethernet Device Driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2003, 2010
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
39 #include <linux/ethtool.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <asm/atomic.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
52 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
56 static struct kobj_type ktype_veth_pool;
59 static const char ibmveth_driver_name[] = "ibmveth";
60 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.04"
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version);
68 static unsigned int tx_copybreak __read_mostly = 128;
69 module_param(tx_copybreak, uint, 0644);
70 MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
73 static unsigned int rx_copybreak __read_mostly = 128;
74 module_param(rx_copybreak, uint, 0644);
75 MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
78 static unsigned int rx_flush __read_mostly = 0;
79 module_param(rx_flush, uint, 0644);
80 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
83 char name[ETH_GSTRING_LEN];
87 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
88 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
90 struct ibmveth_stat ibmveth_stats[] = {
91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
93 { "replenish_add_buff_failure",
94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
105 /* simple methods of getting data from the current rxq entry */
106 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
111 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
113 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 IBMVETH_RXQ_TOGGLE_SHIFT;
117 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
119 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
122 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
124 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
127 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
129 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
132 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
137 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
139 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
142 /* setup the initial settings for a buffer pool */
143 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
144 u32 pool_index, u32 pool_size,
145 u32 buff_size, u32 pool_active)
147 pool->size = pool_size;
148 pool->index = pool_index;
149 pool->buff_size = buff_size;
150 pool->threshold = pool_size * 7 / 8;
151 pool->active = pool_active;
154 /* allocate and setup an buffer pool - called during open */
155 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
165 if (!pool->dma_addr) {
166 kfree(pool->free_map);
167 pool->free_map = NULL;
171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
174 kfree(pool->dma_addr);
175 pool->dma_addr = NULL;
177 kfree(pool->free_map);
178 pool->free_map = NULL;
182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
184 for (i = 0; i < pool->size; ++i)
185 pool->free_map[i] = i;
187 atomic_set(&pool->available, 0);
188 pool->producer_index = 0;
189 pool->consumer_index = 0;
194 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
196 unsigned long offset;
198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
202 /* replenish the buffers for a pool. note that we don't need to
203 * skb_reserve these since they are used for incoming...
205 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206 struct ibmveth_buff_pool *pool)
209 u32 count = pool->size - atomic_read(&pool->available);
210 u32 buffers_added = 0;
212 unsigned int free_index, index;
214 unsigned long lpar_rc;
219 for (i = 0; i < count; ++i) {
220 union ibmveth_buf_desc desc;
222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
225 netdev_dbg(adapter->netdev,
226 "replenish: unable to allocate skb\n");
227 adapter->replenish_no_mem++;
231 free_index = pool->consumer_index;
232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
235 index = pool->free_map[free_index];
237 BUG_ON(index == IBM_VETH_INVALID_MAP);
238 BUG_ON(pool->skbuff[index] != NULL);
240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
241 pool->buff_size, DMA_FROM_DEVICE);
243 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
247 pool->dma_addr[index] = dma_addr;
248 pool->skbuff[index] = skb;
250 correlator = ((u64)pool->index << 32) | index;
251 *(u64 *)skb->data = correlator;
253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
254 desc.fields.address = dma_addr;
257 unsigned int len = min(pool->buff_size,
258 adapter->netdev->mtu +
260 ibmveth_flush_buffer(skb->data, len);
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
265 if (lpar_rc != H_SUCCESS) {
269 adapter->replenish_add_buff_success++;
274 atomic_add(buffers_added, &(pool->available));
278 pool->free_map[free_index] = index;
279 pool->skbuff[index] = NULL;
280 if (pool->consumer_index == 0)
281 pool->consumer_index = pool->size - 1;
283 pool->consumer_index--;
284 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
285 dma_unmap_single(&adapter->vdev->dev,
286 pool->dma_addr[index], pool->buff_size,
288 dev_kfree_skb_any(skb);
289 adapter->replenish_add_buff_failure++;
292 atomic_add(buffers_added, &(pool->available));
295 /* replenish routine */
296 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
300 adapter->replenish_task_cycles++;
302 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
303 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
306 (atomic_read(&pool->available) < pool->threshold))
307 ibmveth_replenish_buffer_pool(adapter, pool);
310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
314 /* empty and free ana buffer pool - also used to do cleanup in error paths */
315 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
316 struct ibmveth_buff_pool *pool)
320 kfree(pool->free_map);
321 pool->free_map = NULL;
323 if (pool->skbuff && pool->dma_addr) {
324 for (i = 0; i < pool->size; ++i) {
325 struct sk_buff *skb = pool->skbuff[i];
327 dma_unmap_single(&adapter->vdev->dev,
331 dev_kfree_skb_any(skb);
332 pool->skbuff[i] = NULL;
337 if (pool->dma_addr) {
338 kfree(pool->dma_addr);
339 pool->dma_addr = NULL;
348 /* remove a buffer from a pool */
349 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
352 unsigned int pool = correlator >> 32;
353 unsigned int index = correlator & 0xffffffffUL;
354 unsigned int free_index;
357 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
358 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
360 skb = adapter->rx_buff_pool[pool].skbuff[index];
364 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
366 dma_unmap_single(&adapter->vdev->dev,
367 adapter->rx_buff_pool[pool].dma_addr[index],
368 adapter->rx_buff_pool[pool].buff_size,
371 free_index = adapter->rx_buff_pool[pool].producer_index;
372 adapter->rx_buff_pool[pool].producer_index++;
373 if (adapter->rx_buff_pool[pool].producer_index >=
374 adapter->rx_buff_pool[pool].size)
375 adapter->rx_buff_pool[pool].producer_index = 0;
376 adapter->rx_buff_pool[pool].free_map[free_index] = index;
380 atomic_dec(&(adapter->rx_buff_pool[pool].available));
383 /* get the current buffer on the rx queue */
384 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
386 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
387 unsigned int pool = correlator >> 32;
388 unsigned int index = correlator & 0xffffffffUL;
390 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
391 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
393 return adapter->rx_buff_pool[pool].skbuff[index];
396 /* recycle the current buffer on the rx queue */
397 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399 u32 q_index = adapter->rx_queue.index;
400 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
401 unsigned int pool = correlator >> 32;
402 unsigned int index = correlator & 0xffffffffUL;
403 union ibmveth_buf_desc desc;
404 unsigned long lpar_rc;
406 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
407 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
409 if (!adapter->rx_buff_pool[pool].active) {
410 ibmveth_rxq_harvest_buffer(adapter);
411 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
415 desc.fields.flags_len = IBMVETH_BUF_VALID |
416 adapter->rx_buff_pool[pool].buff_size;
417 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
419 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
421 if (lpar_rc != H_SUCCESS) {
422 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
423 "during recycle rc=%ld", lpar_rc);
424 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
428 adapter->rx_queue.index = 0;
429 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
433 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
435 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
437 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
438 adapter->rx_queue.index = 0;
439 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
443 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
446 struct device *dev = &adapter->vdev->dev;
448 if (adapter->buffer_list_addr != NULL) {
449 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
450 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
452 adapter->buffer_list_dma = DMA_ERROR_CODE;
454 free_page((unsigned long)adapter->buffer_list_addr);
455 adapter->buffer_list_addr = NULL;
458 if (adapter->filter_list_addr != NULL) {
459 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
460 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
462 adapter->filter_list_dma = DMA_ERROR_CODE;
464 free_page((unsigned long)adapter->filter_list_addr);
465 adapter->filter_list_addr = NULL;
468 if (adapter->rx_queue.queue_addr != NULL) {
469 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
470 dma_unmap_single(dev,
471 adapter->rx_queue.queue_dma,
472 adapter->rx_queue.queue_len,
474 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
476 kfree(adapter->rx_queue.queue_addr);
477 adapter->rx_queue.queue_addr = NULL;
480 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
481 if (adapter->rx_buff_pool[i].active)
482 ibmveth_free_buffer_pool(adapter,
483 &adapter->rx_buff_pool[i]);
485 if (adapter->bounce_buffer != NULL) {
486 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
487 dma_unmap_single(&adapter->vdev->dev,
488 adapter->bounce_buffer_dma,
489 adapter->netdev->mtu + IBMVETH_BUFF_OH,
491 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
493 kfree(adapter->bounce_buffer);
494 adapter->bounce_buffer = NULL;
498 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
499 union ibmveth_buf_desc rxq_desc, u64 mac_address)
501 int rc, try_again = 1;
504 * After a kexec the adapter will still be open, so our attempt to
505 * open it will fail. So if we get a failure we free the adapter and
506 * try again, but only once.
509 rc = h_register_logical_lan(adapter->vdev->unit_address,
510 adapter->buffer_list_dma, rxq_desc.desc,
511 adapter->filter_list_dma, mac_address);
513 if (rc != H_SUCCESS && try_again) {
515 rc = h_free_logical_lan(adapter->vdev->unit_address);
516 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
525 static int ibmveth_open(struct net_device *netdev)
527 struct ibmveth_adapter *adapter = netdev_priv(netdev);
530 unsigned long lpar_rc;
532 union ibmveth_buf_desc rxq_desc;
536 netdev_dbg(netdev, "open starting\n");
538 napi_enable(&adapter->napi);
540 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
541 rxq_entries += adapter->rx_buff_pool[i].size;
543 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
544 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
547 netdev_err(netdev, "unable to allocate filter or buffer list "
549 ibmveth_cleanup(adapter);
550 napi_disable(&adapter->napi);
554 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
556 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
559 if (!adapter->rx_queue.queue_addr) {
560 netdev_err(netdev, "unable to allocate rx queue pages\n");
561 ibmveth_cleanup(adapter);
562 napi_disable(&adapter->napi);
566 dev = &adapter->vdev->dev;
568 adapter->buffer_list_dma = dma_map_single(dev,
569 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
570 adapter->filter_list_dma = dma_map_single(dev,
571 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
572 adapter->rx_queue.queue_dma = dma_map_single(dev,
573 adapter->rx_queue.queue_addr,
574 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
576 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
577 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
579 netdev_err(netdev, "unable to map filter or buffer list "
581 ibmveth_cleanup(adapter);
582 napi_disable(&adapter->napi);
586 adapter->rx_queue.index = 0;
587 adapter->rx_queue.num_slots = rxq_entries;
588 adapter->rx_queue.toggle = 1;
590 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
591 mac_address = mac_address >> 16;
593 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
594 adapter->rx_queue.queue_len;
595 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
597 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
598 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
599 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
601 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
603 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
605 if (lpar_rc != H_SUCCESS) {
606 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
608 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
609 "desc:0x%llx MAC:0x%llx\n",
610 adapter->buffer_list_dma,
611 adapter->filter_list_dma,
614 ibmveth_cleanup(adapter);
615 napi_disable(&adapter->napi);
619 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
620 if (!adapter->rx_buff_pool[i].active)
622 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
623 netdev_err(netdev, "unable to alloc pool\n");
624 adapter->rx_buff_pool[i].active = 0;
625 ibmveth_cleanup(adapter);
626 napi_disable(&adapter->napi);
631 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
632 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
635 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
638 rc = h_free_logical_lan(adapter->vdev->unit_address);
639 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
641 ibmveth_cleanup(adapter);
642 napi_disable(&adapter->napi);
646 adapter->bounce_buffer =
647 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
648 if (!adapter->bounce_buffer) {
649 netdev_err(netdev, "unable to allocate bounce buffer\n");
650 ibmveth_cleanup(adapter);
651 napi_disable(&adapter->napi);
654 adapter->bounce_buffer_dma =
655 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
656 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
657 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
658 netdev_err(netdev, "unable to map bounce buffer\n");
659 ibmveth_cleanup(adapter);
660 napi_disable(&adapter->napi);
664 netdev_dbg(netdev, "initial replenish cycle\n");
665 ibmveth_interrupt(netdev->irq, netdev);
667 netif_start_queue(netdev);
669 netdev_dbg(netdev, "open complete\n");
674 static int ibmveth_close(struct net_device *netdev)
676 struct ibmveth_adapter *adapter = netdev_priv(netdev);
679 netdev_dbg(netdev, "close starting\n");
681 napi_disable(&adapter->napi);
683 if (!adapter->pool_config)
684 netif_stop_queue(netdev);
686 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
689 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
690 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
692 if (lpar_rc != H_SUCCESS) {
693 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
694 "continuing with close\n", lpar_rc);
697 free_irq(netdev->irq, netdev);
699 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
702 ibmveth_cleanup(adapter);
704 netdev_dbg(netdev, "close complete\n");
709 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
711 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
713 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
715 cmd->speed = SPEED_1000;
716 cmd->duplex = DUPLEX_FULL;
717 cmd->port = PORT_FIBRE;
718 cmd->phy_address = 0;
719 cmd->transceiver = XCVR_INTERNAL;
720 cmd->autoneg = AUTONEG_ENABLE;
726 static void netdev_get_drvinfo(struct net_device *dev,
727 struct ethtool_drvinfo *info)
729 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
730 strncpy(info->version, ibmveth_driver_version,
731 sizeof(info->version) - 1);
734 static u32 netdev_get_link(struct net_device *dev)
739 static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
741 struct ibmveth_adapter *adapter = netdev_priv(dev);
744 adapter->rx_csum = 1;
747 * Since the ibmveth firmware interface does not have the
748 * concept of separate tx/rx checksum offload enable, if rx
749 * checksum is disabled we also have to disable tx checksum
750 * offload. Once we disable rx checksum offload, we are no
751 * longer allowed to send tx buffers that are not properly
754 adapter->rx_csum = 0;
755 dev->features &= ~NETIF_F_IP_CSUM;
756 dev->features &= ~NETIF_F_IPV6_CSUM;
760 static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
762 struct ibmveth_adapter *adapter = netdev_priv(dev);
765 if (adapter->fw_ipv4_csum_support)
766 dev->features |= NETIF_F_IP_CSUM;
767 if (adapter->fw_ipv6_csum_support)
768 dev->features |= NETIF_F_IPV6_CSUM;
769 adapter->rx_csum = 1;
771 dev->features &= ~NETIF_F_IP_CSUM;
772 dev->features &= ~NETIF_F_IPV6_CSUM;
776 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
777 void (*done) (struct net_device *, u32))
779 struct ibmveth_adapter *adapter = netdev_priv(dev);
780 unsigned long set_attr, clr_attr, ret_attr;
781 unsigned long set_attr6, clr_attr6;
783 int rc1 = 0, rc2 = 0;
786 if (netif_running(dev)) {
788 adapter->pool_config = 1;
790 adapter->pool_config = 0;
797 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
798 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
800 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
801 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
804 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
806 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
807 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
808 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
809 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
810 set_attr, &ret_attr);
812 if (ret != H_SUCCESS) {
813 netdev_err(dev, "unable to change IPv4 checksum "
814 "offload settings. %d rc=%ld\n",
817 ret = h_illan_attributes(adapter->vdev->unit_address,
818 set_attr, clr_attr, &ret_attr);
820 adapter->fw_ipv4_csum_support = data;
823 ret6 = h_illan_attributes(adapter->vdev->unit_address,
824 clr_attr6, set_attr6, &ret_attr);
826 if (ret6 != H_SUCCESS) {
827 netdev_err(dev, "unable to change IPv6 checksum "
828 "offload settings. %d rc=%ld\n",
831 ret = h_illan_attributes(adapter->vdev->unit_address,
832 set_attr6, clr_attr6,
835 adapter->fw_ipv6_csum_support = data;
837 if (ret == H_SUCCESS || ret6 == H_SUCCESS)
843 netdev_err(dev, "unable to change checksum offload settings."
844 " %d rc=%ld ret_attr=%lx\n", data, ret,
849 rc2 = ibmveth_open(dev);
851 return rc1 ? rc1 : rc2;
854 static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
856 struct ibmveth_adapter *adapter = netdev_priv(dev);
858 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
861 return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
864 static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
866 struct ibmveth_adapter *adapter = netdev_priv(dev);
869 if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
871 if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
874 if (data && !adapter->rx_csum)
875 rc = ibmveth_set_csum_offload(dev, data,
876 ibmveth_set_tx_csum_flags);
878 ibmveth_set_tx_csum_flags(dev, data);
883 static u32 ibmveth_get_rx_csum(struct net_device *dev)
885 struct ibmveth_adapter *adapter = netdev_priv(dev);
886 return adapter->rx_csum;
889 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
893 if (stringset != ETH_SS_STATS)
896 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
897 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
900 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
904 return ARRAY_SIZE(ibmveth_stats);
910 static void ibmveth_get_ethtool_stats(struct net_device *dev,
911 struct ethtool_stats *stats, u64 *data)
914 struct ibmveth_adapter *adapter = netdev_priv(dev);
916 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
917 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
920 static const struct ethtool_ops netdev_ethtool_ops = {
921 .get_drvinfo = netdev_get_drvinfo,
922 .get_settings = netdev_get_settings,
923 .get_link = netdev_get_link,
924 .set_tx_csum = ibmveth_set_tx_csum,
925 .get_rx_csum = ibmveth_get_rx_csum,
926 .set_rx_csum = ibmveth_set_rx_csum,
927 .get_strings = ibmveth_get_strings,
928 .get_sset_count = ibmveth_get_sset_count,
929 .get_ethtool_stats = ibmveth_get_ethtool_stats,
930 .set_sg = ethtool_op_set_sg,
933 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
938 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
940 static int ibmveth_send(struct ibmveth_adapter *adapter,
941 union ibmveth_buf_desc *descs)
943 unsigned long correlator;
944 unsigned int retry_count;
948 * The retry count sets a maximum for the number of broadcast and
949 * multicast destinations within the system.
954 ret = h_send_logical_lan(adapter->vdev->unit_address,
955 descs[0].desc, descs[1].desc,
956 descs[2].desc, descs[3].desc,
957 descs[4].desc, descs[5].desc,
958 correlator, &correlator);
959 } while ((ret == H_BUSY) && (retry_count--));
961 if (ret != H_SUCCESS && ret != H_DROPPED) {
962 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
963 "with rc=%ld\n", ret);
970 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
971 struct net_device *netdev)
973 struct ibmveth_adapter *adapter = netdev_priv(netdev);
974 unsigned int desc_flags;
975 union ibmveth_buf_desc descs[6];
977 int force_bounce = 0;
980 * veth handles a maximum of 6 segments including the header, so
981 * we have to linearize the skb if there are more than this.
983 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
984 netdev->stats.tx_dropped++;
988 /* veth can't checksum offload UDP */
989 if (skb->ip_summed == CHECKSUM_PARTIAL &&
990 ((skb->protocol == htons(ETH_P_IP) &&
991 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
992 (skb->protocol == htons(ETH_P_IPV6) &&
993 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
994 skb_checksum_help(skb)) {
996 netdev_err(netdev, "tx: failed to checksum packet\n");
997 netdev->stats.tx_dropped++;
1001 desc_flags = IBMVETH_BUF_VALID;
1003 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1004 unsigned char *buf = skb_transport_header(skb) +
1007 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1009 /* Need to zero out the checksum */
1015 memset(descs, 0, sizeof(descs));
1018 * If a linear packet is below the rx threshold then
1019 * copy it into the static bounce buffer. This avoids the
1020 * cost of a TCE insert and remove.
1022 if (force_bounce || (!skb_is_nonlinear(skb) &&
1023 (skb->len < tx_copybreak))) {
1024 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1027 descs[0].fields.flags_len = desc_flags | skb->len;
1028 descs[0].fields.address = adapter->bounce_buffer_dma;
1030 if (ibmveth_send(adapter, descs)) {
1031 adapter->tx_send_failed++;
1032 netdev->stats.tx_dropped++;
1034 netdev->stats.tx_packets++;
1035 netdev->stats.tx_bytes += skb->len;
1041 /* Map the header */
1042 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
1045 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
1048 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1051 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1052 unsigned long dma_addr;
1053 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1055 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
1056 frag->page_offset, frag->size,
1059 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1060 goto map_failed_frags;
1062 descs[i+1].fields.flags_len = desc_flags | frag->size;
1063 descs[i+1].fields.address = dma_addr;
1066 if (ibmveth_send(adapter, descs)) {
1067 adapter->tx_send_failed++;
1068 netdev->stats.tx_dropped++;
1070 netdev->stats.tx_packets++;
1071 netdev->stats.tx_bytes += skb->len;
1074 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
1075 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1076 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1081 return NETDEV_TX_OK;
1085 for (i = 0; i < last; i++)
1086 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1087 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1091 if (!firmware_has_feature(FW_FEATURE_CMO))
1092 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1093 adapter->tx_map_failed++;
1099 static int ibmveth_poll(struct napi_struct *napi, int budget)
1101 struct ibmveth_adapter *adapter =
1102 container_of(napi, struct ibmveth_adapter, napi);
1103 struct net_device *netdev = adapter->netdev;
1104 int frames_processed = 0;
1105 unsigned long lpar_rc;
1109 if (!ibmveth_rxq_pending_buffer(adapter))
1113 if (!ibmveth_rxq_buffer_valid(adapter)) {
1114 wmb(); /* suggested by larson1 */
1115 adapter->rx_invalid_buffer++;
1116 netdev_dbg(netdev, "recycling invalid buffer\n");
1117 ibmveth_rxq_recycle_buffer(adapter);
1119 struct sk_buff *skb, *new_skb;
1120 int length = ibmveth_rxq_frame_length(adapter);
1121 int offset = ibmveth_rxq_frame_offset(adapter);
1122 int csum_good = ibmveth_rxq_csum_good(adapter);
1124 skb = ibmveth_rxq_get_buffer(adapter);
1127 if (length < rx_copybreak)
1128 new_skb = netdev_alloc_skb(netdev, length);
1131 skb_copy_to_linear_data(new_skb,
1135 ibmveth_flush_buffer(skb->data,
1138 ibmveth_rxq_recycle_buffer(adapter);
1140 ibmveth_rxq_harvest_buffer(adapter);
1141 skb_reserve(skb, offset);
1144 skb_put(skb, length);
1145 skb->protocol = eth_type_trans(skb, netdev);
1148 skb->ip_summed = CHECKSUM_UNNECESSARY;
1150 netif_receive_skb(skb); /* send it up */
1152 netdev->stats.rx_packets++;
1153 netdev->stats.rx_bytes += length;
1156 } while (frames_processed < budget);
1158 ibmveth_replenish_task(adapter);
1160 if (frames_processed < budget) {
1161 /* We think we are done - reenable interrupts,
1162 * then check once more to make sure we are done.
1164 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1167 BUG_ON(lpar_rc != H_SUCCESS);
1169 napi_complete(napi);
1171 if (ibmveth_rxq_pending_buffer(adapter) &&
1172 napi_reschedule(napi)) {
1173 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1179 return frames_processed;
1182 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1184 struct net_device *netdev = dev_instance;
1185 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1186 unsigned long lpar_rc;
1188 if (napi_schedule_prep(&adapter->napi)) {
1189 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1191 BUG_ON(lpar_rc != H_SUCCESS);
1192 __napi_schedule(&adapter->napi);
1197 static void ibmveth_set_multicast_list(struct net_device *netdev)
1199 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1200 unsigned long lpar_rc;
1202 if ((netdev->flags & IFF_PROMISC) ||
1203 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1204 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1205 IbmVethMcastEnableRecv |
1206 IbmVethMcastDisableFiltering,
1208 if (lpar_rc != H_SUCCESS) {
1209 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1210 "entering promisc mode\n", lpar_rc);
1213 struct netdev_hw_addr *ha;
1214 /* clear the filter table & disable filtering */
1215 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1216 IbmVethMcastEnableRecv |
1217 IbmVethMcastDisableFiltering |
1218 IbmVethMcastClearFilterTable,
1220 if (lpar_rc != H_SUCCESS) {
1221 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1222 "attempting to clear filter table\n",
1225 /* add the addresses to the filter table */
1226 netdev_for_each_mc_addr(ha, netdev) {
1227 /* add the multicast address to the filter table */
1228 unsigned long mcast_addr = 0;
1229 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1230 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1231 IbmVethMcastAddFilter,
1233 if (lpar_rc != H_SUCCESS) {
1234 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1235 "when adding an entry to the filter "
1236 "table\n", lpar_rc);
1240 /* re-enable filtering */
1241 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1242 IbmVethMcastEnableFiltering,
1244 if (lpar_rc != H_SUCCESS) {
1245 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1246 "enabling filtering\n", lpar_rc);
1251 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1253 struct ibmveth_adapter *adapter = netdev_priv(dev);
1254 struct vio_dev *viodev = adapter->vdev;
1255 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1257 int need_restart = 0;
1259 if (new_mtu < IBMVETH_MIN_MTU)
1262 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1263 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1266 if (i == IBMVETH_NUM_BUFF_POOLS)
1269 /* Deactivate all the buffer pools so that the next loop can activate
1270 only the buffer pools necessary to hold the new MTU */
1271 if (netif_running(adapter->netdev)) {
1273 adapter->pool_config = 1;
1274 ibmveth_close(adapter->netdev);
1275 adapter->pool_config = 0;
1278 /* Look for an active buffer pool that can hold the new MTU */
1279 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1280 adapter->rx_buff_pool[i].active = 1;
1282 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1284 vio_cmo_set_dev_desired(viodev,
1285 ibmveth_get_desired_dma
1288 return ibmveth_open(adapter->netdev);
1294 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1300 #ifdef CONFIG_NET_POLL_CONTROLLER
1301 static void ibmveth_poll_controller(struct net_device *dev)
1303 ibmveth_replenish_task(netdev_priv(dev));
1304 ibmveth_interrupt(dev->irq, dev);
1309 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1311 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1314 * Number of bytes of IO data the driver will need to perform well.
1316 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1318 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1319 struct ibmveth_adapter *adapter;
1324 /* netdev inits at probe time along with the structures we need below*/
1326 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1328 adapter = netdev_priv(netdev);
1330 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1331 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1333 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1334 /* add the size of the active receive buffers */
1335 if (adapter->rx_buff_pool[i].active)
1337 adapter->rx_buff_pool[i].size *
1338 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1340 rxqentries += adapter->rx_buff_pool[i].size;
1342 /* add the size of the receive queue entries */
1343 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1348 static const struct net_device_ops ibmveth_netdev_ops = {
1349 .ndo_open = ibmveth_open,
1350 .ndo_stop = ibmveth_close,
1351 .ndo_start_xmit = ibmveth_start_xmit,
1352 .ndo_set_multicast_list = ibmveth_set_multicast_list,
1353 .ndo_do_ioctl = ibmveth_ioctl,
1354 .ndo_change_mtu = ibmveth_change_mtu,
1355 .ndo_validate_addr = eth_validate_addr,
1356 .ndo_set_mac_address = eth_mac_addr,
1357 #ifdef CONFIG_NET_POLL_CONTROLLER
1358 .ndo_poll_controller = ibmveth_poll_controller,
1362 static int __devinit ibmveth_probe(struct vio_dev *dev,
1363 const struct vio_device_id *id)
1366 struct net_device *netdev;
1367 struct ibmveth_adapter *adapter;
1368 unsigned char *mac_addr_p;
1369 unsigned int *mcastFilterSize_p;
1371 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1374 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1377 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1381 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1382 VETH_MCAST_FILTER_SIZE, NULL);
1383 if (!mcastFilterSize_p) {
1384 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1389 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1394 adapter = netdev_priv(netdev);
1395 dev_set_drvdata(&dev->dev, netdev);
1397 adapter->vdev = dev;
1398 adapter->netdev = netdev;
1399 adapter->mcastFilterSize = *mcastFilterSize_p;
1400 adapter->pool_config = 0;
1402 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1405 * Some older boxes running PHYP non-natively have an OF that returns
1406 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1407 * ignored) while newer boxes' OF return a 6-byte field. Note that
1408 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1409 * The RPA doc specifies that the first byte must be 10b, so we'll
1410 * just look for it to solve this 8 vs. 6 byte field issue
1412 if ((*mac_addr_p & 0x3) != 0x02)
1415 adapter->mac_addr = 0;
1416 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1418 netdev->irq = dev->irq;
1419 netdev->netdev_ops = &ibmveth_netdev_ops;
1420 netdev->ethtool_ops = &netdev_ethtool_ops;
1421 SET_NETDEV_DEV(netdev, &dev->dev);
1422 netdev->features |= NETIF_F_SG;
1424 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1426 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1427 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1430 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1431 pool_count[i], pool_size[i],
1433 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1434 &dev->dev.kobj, "pool%d", i);
1436 kobject_uevent(kobj, KOBJ_ADD);
1439 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1441 adapter->buffer_list_dma = DMA_ERROR_CODE;
1442 adapter->filter_list_dma = DMA_ERROR_CODE;
1443 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1445 netdev_dbg(netdev, "registering netdev...\n");
1447 ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
1449 rc = register_netdev(netdev);
1452 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1453 free_netdev(netdev);
1457 netdev_dbg(netdev, "registered\n");
1462 static int __devexit ibmveth_remove(struct vio_dev *dev)
1464 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1465 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1468 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1469 kobject_put(&adapter->rx_buff_pool[i].kobj);
1471 unregister_netdev(netdev);
1473 free_netdev(netdev);
1474 dev_set_drvdata(&dev->dev, NULL);
1479 static struct attribute veth_active_attr;
1480 static struct attribute veth_num_attr;
1481 static struct attribute veth_size_attr;
1483 static ssize_t veth_pool_show(struct kobject *kobj,
1484 struct attribute *attr, char *buf)
1486 struct ibmveth_buff_pool *pool = container_of(kobj,
1487 struct ibmveth_buff_pool,
1490 if (attr == &veth_active_attr)
1491 return sprintf(buf, "%d\n", pool->active);
1492 else if (attr == &veth_num_attr)
1493 return sprintf(buf, "%d\n", pool->size);
1494 else if (attr == &veth_size_attr)
1495 return sprintf(buf, "%d\n", pool->buff_size);
1499 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1500 const char *buf, size_t count)
1502 struct ibmveth_buff_pool *pool = container_of(kobj,
1503 struct ibmveth_buff_pool,
1505 struct net_device *netdev = dev_get_drvdata(
1506 container_of(kobj->parent, struct device, kobj));
1507 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1508 long value = simple_strtol(buf, NULL, 10);
1511 if (attr == &veth_active_attr) {
1512 if (value && !pool->active) {
1513 if (netif_running(netdev)) {
1514 if (ibmveth_alloc_buffer_pool(pool)) {
1516 "unable to alloc pool\n");
1520 adapter->pool_config = 1;
1521 ibmveth_close(netdev);
1522 adapter->pool_config = 0;
1523 if ((rc = ibmveth_open(netdev)))
1528 } else if (!value && pool->active) {
1529 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1531 /* Make sure there is a buffer pool with buffers that
1532 can hold a packet of the size of the MTU */
1533 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1534 if (pool == &adapter->rx_buff_pool[i])
1536 if (!adapter->rx_buff_pool[i].active)
1538 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1542 if (i == IBMVETH_NUM_BUFF_POOLS) {
1543 netdev_err(netdev, "no active pool >= MTU\n");
1547 if (netif_running(netdev)) {
1548 adapter->pool_config = 1;
1549 ibmveth_close(netdev);
1551 adapter->pool_config = 0;
1552 if ((rc = ibmveth_open(netdev)))
1557 } else if (attr == &veth_num_attr) {
1558 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1561 if (netif_running(netdev)) {
1562 adapter->pool_config = 1;
1563 ibmveth_close(netdev);
1564 adapter->pool_config = 0;
1566 if ((rc = ibmveth_open(netdev)))
1572 } else if (attr == &veth_size_attr) {
1573 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1576 if (netif_running(netdev)) {
1577 adapter->pool_config = 1;
1578 ibmveth_close(netdev);
1579 adapter->pool_config = 0;
1580 pool->buff_size = value;
1581 if ((rc = ibmveth_open(netdev)))
1584 pool->buff_size = value;
1589 /* kick the interrupt handler to allocate/deallocate pools */
1590 ibmveth_interrupt(netdev->irq, netdev);
1595 #define ATTR(_name, _mode) \
1596 struct attribute veth_##_name##_attr = { \
1597 .name = __stringify(_name), .mode = _mode, \
1600 static ATTR(active, 0644);
1601 static ATTR(num, 0644);
1602 static ATTR(size, 0644);
1604 static struct attribute *veth_pool_attrs[] = {
1611 static const struct sysfs_ops veth_pool_ops = {
1612 .show = veth_pool_show,
1613 .store = veth_pool_store,
1616 static struct kobj_type ktype_veth_pool = {
1618 .sysfs_ops = &veth_pool_ops,
1619 .default_attrs = veth_pool_attrs,
1622 static int ibmveth_resume(struct device *dev)
1624 struct net_device *netdev = dev_get_drvdata(dev);
1625 ibmveth_interrupt(netdev->irq, netdev);
1629 static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1630 { "network", "IBM,l-lan"},
1633 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1635 static struct dev_pm_ops ibmveth_pm_ops = {
1636 .resume = ibmveth_resume
1639 static struct vio_driver ibmveth_driver = {
1640 .id_table = ibmveth_device_table,
1641 .probe = ibmveth_probe,
1642 .remove = ibmveth_remove,
1643 .get_desired_dma = ibmveth_get_desired_dma,
1645 .name = ibmveth_driver_name,
1646 .owner = THIS_MODULE,
1647 .pm = &ibmveth_pm_ops,
1651 static int __init ibmveth_module_init(void)
1653 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1654 ibmveth_driver_string, ibmveth_driver_version);
1656 return vio_register_driver(&ibmveth_driver);
1659 static void __exit ibmveth_module_exit(void)
1661 vio_unregister_driver(&ibmveth_driver);
1664 module_init(ibmveth_module_init);
1665 module_exit(ibmveth_module_exit);