]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/chelsio/cxgb2.c
WorkStruct: make allyesconfig
[net-next-2.6.git] / drivers / net / chelsio / cxgb2.c
CommitLineData
8199d3a7
CL
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
559fb51b
SB
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
8199d3a7
CL
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
8199d3a7
CL
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/pci.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/if_vlan.h>
46#include <linux/mii.h>
47#include <linux/sockios.h>
48#include <linux/proc_fs.h>
559fb51b 49#include <linux/dma-mapping.h>
8199d3a7
CL
50#include <asm/uaccess.h>
51
8199d3a7
CL
52#include "cpl5_cmd.h"
53#include "regs.h"
54#include "gmac.h"
55#include "cphy.h"
56#include "sge.h"
8199d3a7
CL
57#include "espi.h"
58
559fb51b
SB
59#ifdef work_struct
60#include <linux/tqueue.h>
61#define INIT_WORK INIT_TQUEUE
62#define schedule_work schedule_task
63#define flush_scheduled_work flush_scheduled_tasks
64
8199d3a7
CL
65static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
66{
559fb51b 67 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
8199d3a7
CL
68}
69
70static inline void cancel_mac_stats_update(struct adapter *ap)
71{
559fb51b
SB
72 del_timer_sync(&ap->stats_update_timer);
73 flush_scheduled_tasks();
8199d3a7
CL
74}
75
559fb51b
SB
76/*
77 * Stats update timer for 2.4. It schedules a task to do the actual update as
78 * we need to access MAC statistics in process context.
79 */
80static void mac_stats_timer(unsigned long data)
81{
82 struct adapter *ap = (struct adapter *)data;
8199d3a7 83
559fb51b
SB
84 schedule_task(&ap->stats_update_task);
85}
86#else
87#include <linux/workqueue.h>
8199d3a7 88
559fb51b
SB
89static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
90{
91 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
92}
8199d3a7 93
559fb51b
SB
94static inline void cancel_mac_stats_update(struct adapter *ap)
95{
96 cancel_delayed_work(&ap->stats_update_task);
97}
98#endif
8199d3a7
CL
99
100#define MAX_CMDQ_ENTRIES 16384
101#define MAX_CMDQ1_ENTRIES 1024
102#define MAX_RX_BUFFERS 16384
103#define MAX_RX_JUMBO_BUFFERS 16384
104#define MAX_TX_BUFFERS_HIGH 16384U
105#define MAX_TX_BUFFERS_LOW 1536U
106#define MIN_FL_ENTRIES 32
107
108#define PORT_MASK ((1 << MAX_NPORTS) - 1)
109
110#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
111 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
112 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
113
114/*
115 * The EEPROM is actually bigger but only the first few bytes are used so we
116 * only report those.
117 */
118#define EEPROM_SIZE 32
119
559fb51b 120MODULE_DESCRIPTION(DRV_DESCRIPTION);
8199d3a7
CL
121MODULE_AUTHOR("Chelsio Communications");
122MODULE_LICENSE("GPL");
8199d3a7
CL
123
124static int dflt_msg_enable = DFLT_MSG_ENABLE;
125
8d3b33f6 126module_param(dflt_msg_enable, int, 0);
8199d3a7
CL
127MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
128
129
130static const char pci_speed[][4] = {
131 "33", "66", "100", "133"
132};
133
134/*
135 * Setup MAC to receive the types of packets we want.
136 */
137static void t1_set_rxmode(struct net_device *dev)
138{
139 struct adapter *adapter = dev->priv;
140 struct cmac *mac = adapter->port[dev->if_port].mac;
141 struct t1_rx_mode rm;
142
143 rm.dev = dev;
144 rm.idx = 0;
145 rm.list = dev->mc_list;
146 mac->ops->set_rx_mode(mac, &rm);
147}
148
149static void link_report(struct port_info *p)
150{
151 if (!netif_carrier_ok(p->dev))
559fb51b 152 printk(KERN_INFO "%s: link down\n", p->dev->name);
8199d3a7 153 else {
559fb51b 154 const char *s = "10Mbps";
8199d3a7
CL
155
156 switch (p->link_config.speed) {
559fb51b
SB
157 case SPEED_10000: s = "10Gbps"; break;
158 case SPEED_1000: s = "1000Mbps"; break;
159 case SPEED_100: s = "100Mbps"; break;
8199d3a7
CL
160 }
161
559fb51b 162 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
8199d3a7
CL
163 p->dev->name, s,
164 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
165 }
166}
167
168void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
169 int speed, int duplex, int pause)
170{
171 struct port_info *p = &adapter->port[port_id];
172
173 if (link_stat != netif_carrier_ok(p->dev)) {
174 if (link_stat)
175 netif_carrier_on(p->dev);
176 else
177 netif_carrier_off(p->dev);
178 link_report(p);
179
180 }
181}
182
183static void link_start(struct port_info *p)
184{
185 struct cmac *mac = p->mac;
186
187 mac->ops->reset(mac);
188 if (mac->ops->macaddress_set)
189 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 t1_set_rxmode(p->dev);
191 t1_link_start(p->phy, mac, &p->link_config);
192 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193}
194
195static void enable_hw_csum(struct adapter *adapter)
196{
197 if (adapter->flags & TSO_CAPABLE)
559fb51b
SB
198 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
199 t1_tp_set_tcp_checksum_offload(adapter, 1);
8199d3a7
CL
200}
201
202/*
203 * Things to do upon first use of a card.
204 * This must run with the rtnl lock held.
205 */
206static int cxgb_up(struct adapter *adapter)
207{
208 int err = 0;
209
210 if (!(adapter->flags & FULL_INIT_DONE)) {
211 err = t1_init_hw_modules(adapter);
212 if (err)
213 goto out_err;
214
215 enable_hw_csum(adapter);
216 adapter->flags |= FULL_INIT_DONE;
217 }
218
219 t1_interrupts_clear(adapter);
559fb51b 220 if ((err = request_irq(adapter->pdev->irq,
1fb9df5d 221 t1_select_intr_handler(adapter), IRQF_SHARED,
559fb51b 222 adapter->name, adapter))) {
8199d3a7 223 goto out_err;
559fb51b 224 }
8199d3a7
CL
225 t1_sge_start(adapter->sge);
226 t1_interrupts_enable(adapter);
8199d3a7
CL
227 out_err:
228 return err;
229}
230
231/*
232 * Release resources when all the ports have been stopped.
233 */
234static void cxgb_down(struct adapter *adapter)
235{
236 t1_sge_stop(adapter->sge);
237 t1_interrupts_disable(adapter);
238 free_irq(adapter->pdev->irq, adapter);
239}
240
241static int cxgb_open(struct net_device *dev)
242{
243 int err;
244 struct adapter *adapter = dev->priv;
245 int other_ports = adapter->open_device_map & PORT_MASK;
246
247 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
248 return err;
249
250 __set_bit(dev->if_port, &adapter->open_device_map);
251 link_start(&adapter->port[dev->if_port]);
252 netif_start_queue(dev);
253 if (!other_ports && adapter->params.stats_update_period)
254 schedule_mac_stats_update(adapter,
255 adapter->params.stats_update_period);
256 return 0;
257}
258
259static int cxgb_close(struct net_device *dev)
260{
261 struct adapter *adapter = dev->priv;
262 struct port_info *p = &adapter->port[dev->if_port];
263 struct cmac *mac = p->mac;
264
265 netif_stop_queue(dev);
266 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
267 netif_carrier_off(dev);
268
269 clear_bit(dev->if_port, &adapter->open_device_map);
270 if (adapter->params.stats_update_period &&
271 !(adapter->open_device_map & PORT_MASK)) {
272 /* Stop statistics accumulation. */
273 smp_mb__after_clear_bit();
274 spin_lock(&adapter->work_lock); /* sync with update task */
275 spin_unlock(&adapter->work_lock);
276 cancel_mac_stats_update(adapter);
277 }
278
279 if (!adapter->open_device_map)
280 cxgb_down(adapter);
281 return 0;
282}
283
284static struct net_device_stats *t1_get_stats(struct net_device *dev)
285{
286 struct adapter *adapter = dev->priv;
287 struct port_info *p = &adapter->port[dev->if_port];
288 struct net_device_stats *ns = &p->netstats;
289 const struct cmac_statistics *pstats;
290
291 /* Do a full update of the MAC stats */
292 pstats = p->mac->ops->statistics_update(p->mac,
293 MAC_STATS_UPDATE_FULL);
294
295 ns->tx_packets = pstats->TxUnicastFramesOK +
296 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
297
298 ns->rx_packets = pstats->RxUnicastFramesOK +
299 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
300
301 ns->tx_bytes = pstats->TxOctetsOK;
302 ns->rx_bytes = pstats->RxOctetsOK;
303
304 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
305 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
306 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
307 pstats->RxFCSErrors + pstats->RxAlignErrors +
308 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
309 pstats->RxSymbolErrors + pstats->RxRuntErrors;
310
311 ns->multicast = pstats->RxMulticastFramesOK;
312 ns->collisions = pstats->TxTotalCollisions;
313
314 /* detailed rx_errors */
315 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
316 pstats->RxJabberErrors;
317 ns->rx_over_errors = 0;
318 ns->rx_crc_errors = pstats->RxFCSErrors;
319 ns->rx_frame_errors = pstats->RxAlignErrors;
320 ns->rx_fifo_errors = 0;
321 ns->rx_missed_errors = 0;
322
323 /* detailed tx_errors */
324 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
325 ns->tx_carrier_errors = 0;
326 ns->tx_fifo_errors = pstats->TxUnderrun;
327 ns->tx_heartbeat_errors = 0;
328 ns->tx_window_errors = pstats->TxLateCollisions;
329 return ns;
330}
331
332static u32 get_msglevel(struct net_device *dev)
333{
334 struct adapter *adapter = dev->priv;
335
336 return adapter->msg_enable;
337}
338
339static void set_msglevel(struct net_device *dev, u32 val)
340{
341 struct adapter *adapter = dev->priv;
342
343 adapter->msg_enable = val;
344}
345
346static char stats_strings[][ETH_GSTRING_LEN] = {
559fb51b
SB
347 "TxOctetsOK",
348 "TxOctetsBad",
349 "TxUnicastFramesOK",
350 "TxMulticastFramesOK",
351 "TxBroadcastFramesOK",
352 "TxPauseFrames",
353 "TxFramesWithDeferredXmissions",
354 "TxLateCollisions",
355 "TxTotalCollisions",
356 "TxFramesAbortedDueToXSCollisions",
357 "TxUnderrun",
358 "TxLengthErrors",
359 "TxInternalMACXmitError",
360 "TxFramesWithExcessiveDeferral",
361 "TxFCSErrors",
362
363 "RxOctetsOK",
364 "RxOctetsBad",
365 "RxUnicastFramesOK",
366 "RxMulticastFramesOK",
367 "RxBroadcastFramesOK",
368 "RxPauseFrames",
369 "RxFCSErrors",
370 "RxAlignErrors",
371 "RxSymbolErrors",
372 "RxDataErrors",
373 "RxSequenceErrors",
374 "RxRuntErrors",
375 "RxJabberErrors",
376 "RxInternalMACRcvError",
377 "RxInRangeLengthErrors",
378 "RxOutOfRangeLengthField",
379 "RxFrameTooLongErrors",
380
381 "TSO",
382 "VLANextractions",
383 "VLANinsertions",
384 "RxCsumGood",
385 "TxCsumOffload",
386 "RxDrops"
387
388 "respQ_empty",
389 "respQ_overflow",
390 "freelistQ_empty",
391 "pkt_too_big",
392 "pkt_mismatch",
393 "cmdQ_full0",
394 "cmdQ_full1",
395 "tx_ipfrags",
396 "tx_reg_pkts",
397 "tx_lso_pkts",
398 "tx_do_cksum",
399
400 "espi_DIP2ParityErr",
401 "espi_DIP4Err",
402 "espi_RxDrops",
403 "espi_TxDrops",
404 "espi_RxOvfl",
405 "espi_ParityErr"
8199d3a7 406};
559fb51b
SB
407
408#define T2_REGMAP_SIZE (3 * 1024)
409
410static int get_regs_len(struct net_device *dev)
411{
412 return T2_REGMAP_SIZE;
413}
8199d3a7
CL
414
415static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
416{
417 struct adapter *adapter = dev->priv;
418
559fb51b
SB
419 strcpy(info->driver, DRV_NAME);
420 strcpy(info->version, DRV_VERSION);
8199d3a7
CL
421 strcpy(info->fw_version, "N/A");
422 strcpy(info->bus_info, pci_name(adapter->pdev));
423}
424
425static int get_stats_count(struct net_device *dev)
426{
427 return ARRAY_SIZE(stats_strings);
428}
429
430static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
431{
432 if (stringset == ETH_SS_STATS)
433 memcpy(data, stats_strings, sizeof(stats_strings));
434}
435
436static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
437 u64 *data)
438{
439 struct adapter *adapter = dev->priv;
440 struct cmac *mac = adapter->port[dev->if_port].mac;
441 const struct cmac_statistics *s;
559fb51b
SB
442 const struct sge_port_stats *ss;
443 const struct sge_intr_counts *t;
8199d3a7
CL
444
445 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
559fb51b
SB
446 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
447 t = t1_sge_get_intr_counts(adapter->sge);
448
449 *data++ = s->TxOctetsOK;
450 *data++ = s->TxOctetsBad;
451 *data++ = s->TxUnicastFramesOK;
452 *data++ = s->TxMulticastFramesOK;
453 *data++ = s->TxBroadcastFramesOK;
454 *data++ = s->TxPauseFrames;
455 *data++ = s->TxFramesWithDeferredXmissions;
456 *data++ = s->TxLateCollisions;
457 *data++ = s->TxTotalCollisions;
458 *data++ = s->TxFramesAbortedDueToXSCollisions;
459 *data++ = s->TxUnderrun;
460 *data++ = s->TxLengthErrors;
461 *data++ = s->TxInternalMACXmitError;
462 *data++ = s->TxFramesWithExcessiveDeferral;
463 *data++ = s->TxFCSErrors;
464
465 *data++ = s->RxOctetsOK;
466 *data++ = s->RxOctetsBad;
467 *data++ = s->RxUnicastFramesOK;
468 *data++ = s->RxMulticastFramesOK;
469 *data++ = s->RxBroadcastFramesOK;
470 *data++ = s->RxPauseFrames;
471 *data++ = s->RxFCSErrors;
472 *data++ = s->RxAlignErrors;
473 *data++ = s->RxSymbolErrors;
474 *data++ = s->RxDataErrors;
475 *data++ = s->RxSequenceErrors;
476 *data++ = s->RxRuntErrors;
477 *data++ = s->RxJabberErrors;
478 *data++ = s->RxInternalMACRcvError;
479 *data++ = s->RxInRangeLengthErrors;
480 *data++ = s->RxOutOfRangeLengthField;
481 *data++ = s->RxFrameTooLongErrors;
482
483 *data++ = ss->tso;
484 *data++ = ss->vlan_xtract;
485 *data++ = ss->vlan_insert;
486 *data++ = ss->rx_cso_good;
487 *data++ = ss->tx_cso;
488 *data++ = ss->rx_drops;
489
490 *data++ = (u64)t->respQ_empty;
491 *data++ = (u64)t->respQ_overflow;
492 *data++ = (u64)t->freelistQ_empty;
493 *data++ = (u64)t->pkt_too_big;
494 *data++ = (u64)t->pkt_mismatch;
495 *data++ = (u64)t->cmdQ_full[0];
496 *data++ = (u64)t->cmdQ_full[1];
497 *data++ = (u64)t->tx_ipfrags;
498 *data++ = (u64)t->tx_reg_pkts;
499 *data++ = (u64)t->tx_lso_pkts;
500 *data++ = (u64)t->tx_do_cksum;
501}
502
503static inline void reg_block_dump(struct adapter *ap, void *buf,
504 unsigned int start, unsigned int end)
505{
506 u32 *p = buf + start;
507
508 for ( ; start <= end; start += sizeof(u32))
509 *p++ = readl(ap->regs + start);
510}
8199d3a7 511
559fb51b
SB
512static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
513 void *buf)
514{
515 struct adapter *ap = dev->priv;
516
517 /*
518 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
519 */
520 regs->version = 2;
521
522 memset(buf, 0, T2_REGMAP_SIZE);
523 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
8199d3a7
CL
524}
525
526static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
527{
528 struct adapter *adapter = dev->priv;
529 struct port_info *p = &adapter->port[dev->if_port];
530
531 cmd->supported = p->link_config.supported;
532 cmd->advertising = p->link_config.advertising;
533
534 if (netif_carrier_ok(dev)) {
535 cmd->speed = p->link_config.speed;
536 cmd->duplex = p->link_config.duplex;
537 } else {
538 cmd->speed = -1;
539 cmd->duplex = -1;
540 }
541
559fb51b
SB
542 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
543 cmd->phy_address = p->phy->addr;
544 cmd->transceiver = XCVR_EXTERNAL;
545 cmd->autoneg = p->link_config.autoneg;
546 cmd->maxtxpkt = 0;
547 cmd->maxrxpkt = 0;
8199d3a7
CL
548 return 0;
549}
550
551static int speed_duplex_to_caps(int speed, int duplex)
552{
553 int cap = 0;
554
555 switch (speed) {
556 case SPEED_10:
557 if (duplex == DUPLEX_FULL)
558 cap = SUPPORTED_10baseT_Full;
559 else
560 cap = SUPPORTED_10baseT_Half;
561 break;
562 case SPEED_100:
563 if (duplex == DUPLEX_FULL)
564 cap = SUPPORTED_100baseT_Full;
565 else
566 cap = SUPPORTED_100baseT_Half;
567 break;
568 case SPEED_1000:
569 if (duplex == DUPLEX_FULL)
570 cap = SUPPORTED_1000baseT_Full;
571 else
572 cap = SUPPORTED_1000baseT_Half;
573 break;
574 case SPEED_10000:
575 if (duplex == DUPLEX_FULL)
576 cap = SUPPORTED_10000baseT_Full;
577 }
578 return cap;
579}
580
581#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
582 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
583 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
584 ADVERTISED_10000baseT_Full)
585
586static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
587{
588 struct adapter *adapter = dev->priv;
589 struct port_info *p = &adapter->port[dev->if_port];
590 struct link_config *lc = &p->link_config;
591
592 if (!(lc->supported & SUPPORTED_Autoneg))
559fb51b 593 return -EOPNOTSUPP; /* can't change speed/duplex */
8199d3a7
CL
594
595 if (cmd->autoneg == AUTONEG_DISABLE) {
596 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
597
598 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
599 return -EINVAL;
600 lc->requested_speed = cmd->speed;
601 lc->requested_duplex = cmd->duplex;
602 lc->advertising = 0;
603 } else {
604 cmd->advertising &= ADVERTISED_MASK;
605 if (cmd->advertising & (cmd->advertising - 1))
606 cmd->advertising = lc->supported;
607 cmd->advertising &= lc->supported;
608 if (!cmd->advertising)
609 return -EINVAL;
610 lc->requested_speed = SPEED_INVALID;
611 lc->requested_duplex = DUPLEX_INVALID;
612 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
613 }
614 lc->autoneg = cmd->autoneg;
615 if (netif_running(dev))
616 t1_link_start(p->phy, p->mac, lc);
617 return 0;
618}
619
620static void get_pauseparam(struct net_device *dev,
621 struct ethtool_pauseparam *epause)
622{
623 struct adapter *adapter = dev->priv;
624 struct port_info *p = &adapter->port[dev->if_port];
625
626 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
627 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
628 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
629}
630
631static int set_pauseparam(struct net_device *dev,
632 struct ethtool_pauseparam *epause)
633{
634 struct adapter *adapter = dev->priv;
635 struct port_info *p = &adapter->port[dev->if_port];
636 struct link_config *lc = &p->link_config;
637
638 if (epause->autoneg == AUTONEG_DISABLE)
639 lc->requested_fc = 0;
640 else if (lc->supported & SUPPORTED_Autoneg)
641 lc->requested_fc = PAUSE_AUTONEG;
642 else
643 return -EINVAL;
644
645 if (epause->rx_pause)
646 lc->requested_fc |= PAUSE_RX;
647 if (epause->tx_pause)
648 lc->requested_fc |= PAUSE_TX;
649 if (lc->autoneg == AUTONEG_ENABLE) {
650 if (netif_running(dev))
651 t1_link_start(p->phy, p->mac, lc);
652 } else {
653 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
654 if (netif_running(dev))
655 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
656 lc->fc);
657 }
658 return 0;
659}
660
661static u32 get_rx_csum(struct net_device *dev)
662{
663 struct adapter *adapter = dev->priv;
664
665 return (adapter->flags & RX_CSUM_ENABLED) != 0;
666}
667
668static int set_rx_csum(struct net_device *dev, u32 data)
669{
670 struct adapter *adapter = dev->priv;
671
672 if (data)
673 adapter->flags |= RX_CSUM_ENABLED;
674 else
675 adapter->flags &= ~RX_CSUM_ENABLED;
676 return 0;
677}
678
679static int set_tso(struct net_device *dev, u32 value)
680{
681 struct adapter *adapter = dev->priv;
682
683 if (!(adapter->flags & TSO_CAPABLE))
684 return value ? -EOPNOTSUPP : 0;
685 return ethtool_op_set_tso(dev, value);
686}
687
688static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
689{
690 struct adapter *adapter = dev->priv;
691 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
692
693 e->rx_max_pending = MAX_RX_BUFFERS;
694 e->rx_mini_max_pending = 0;
695 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
696 e->tx_max_pending = MAX_CMDQ_ENTRIES;
697
698 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
699 e->rx_mini_pending = 0;
700 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
701 e->tx_pending = adapter->params.sge.cmdQ_size[0];
702}
703
704static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
705{
706 struct adapter *adapter = dev->priv;
707 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
708
709 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
710 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
711 e->tx_pending > MAX_CMDQ_ENTRIES ||
712 e->rx_pending < MIN_FL_ENTRIES ||
713 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
714 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
715 return -EINVAL;
716
717 if (adapter->flags & FULL_INIT_DONE)
559fb51b 718 return -EBUSY;
8199d3a7
CL
719
720 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
721 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
722 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
723 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
724 MAX_CMDQ1_ENTRIES : e->tx_pending;
725 return 0;
726}
727
728static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
729{
730 struct adapter *adapter = dev->priv;
731
559fb51b
SB
732 /*
733 * If RX coalescing is requested we use NAPI, otherwise interrupts.
734 * This choice can be made only when all ports and the TOE are off.
735 */
736 if (adapter->open_device_map == 0)
737 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
8199d3a7 738
559fb51b
SB
739 if (adapter->params.sge.polling) {
740 adapter->params.sge.rx_coalesce_usecs = 0;
8199d3a7
CL
741 } else {
742 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
743 }
559fb51b 744 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
8199d3a7 745 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
8199d3a7
CL
746 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
747 return 0;
748}
749
750static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
751{
752 struct adapter *adapter = dev->priv;
753
559fb51b 754 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
8199d3a7
CL
755 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
756 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
757 return 0;
758}
759
760static int get_eeprom_len(struct net_device *dev)
761{
559fb51b 762 return EEPROM_SIZE;
8199d3a7
CL
763}
764
765#define EEPROM_MAGIC(ap) \
766 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
767
768static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
769 u8 *data)
770{
771 int i;
772 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
773 struct adapter *adapter = dev->priv;
774
775 e->magic = EEPROM_MAGIC(adapter);
776 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
777 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
778 memcpy(data, buf + e->offset, e->len);
779 return 0;
780}
781
7282d491 782static const struct ethtool_ops t1_ethtool_ops = {
8199d3a7
CL
783 .get_settings = get_settings,
784 .set_settings = set_settings,
785 .get_drvinfo = get_drvinfo,
786 .get_msglevel = get_msglevel,
787 .set_msglevel = set_msglevel,
788 .get_ringparam = get_sge_param,
789 .set_ringparam = set_sge_param,
790 .get_coalesce = get_coalesce,
791 .set_coalesce = set_coalesce,
792 .get_eeprom_len = get_eeprom_len,
793 .get_eeprom = get_eeprom,
794 .get_pauseparam = get_pauseparam,
795 .set_pauseparam = set_pauseparam,
796 .get_rx_csum = get_rx_csum,
797 .set_rx_csum = set_rx_csum,
798 .get_tx_csum = ethtool_op_get_tx_csum,
799 .set_tx_csum = ethtool_op_set_tx_csum,
800 .get_sg = ethtool_op_get_sg,
801 .set_sg = ethtool_op_set_sg,
802 .get_link = ethtool_op_get_link,
803 .get_strings = get_strings,
804 .get_stats_count = get_stats_count,
805 .get_ethtool_stats = get_stats,
559fb51b
SB
806 .get_regs_len = get_regs_len,
807 .get_regs = get_regs,
8199d3a7
CL
808 .get_tso = ethtool_op_get_tso,
809 .set_tso = set_tso,
810};
811
559fb51b
SB
812static void cxgb_proc_cleanup(struct adapter *adapter,
813 struct proc_dir_entry *dir)
8199d3a7 814{
559fb51b
SB
815 const char *name;
816 name = adapter->name;
817 remove_proc_entry(name, dir);
8199d3a7 818}
559fb51b
SB
819//#define chtoe_setup_toedev(adapter) NULL
820#define update_mtu_tab(adapter)
821#define write_smt_entry(adapter, idx)
8199d3a7
CL
822
823static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
824{
559fb51b 825 struct adapter *adapter = dev->priv;
91fb4c96 826 struct mii_ioctl_data *data = if_mii(req);
8199d3a7
CL
827
828 switch (cmd) {
559fb51b
SB
829 case SIOCGMIIPHY:
830 data->phy_id = adapter->port[dev->if_port].phy->addr;
831 /* FALLTHRU */
832 case SIOCGMIIREG: {
8199d3a7
CL
833 struct cphy *phy = adapter->port[dev->if_port].phy;
834 u32 val;
835
559fb51b
SB
836 if (!phy->mdio_read)
837 return -EOPNOTSUPP;
8199d3a7
CL
838 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
839 &val);
559fb51b
SB
840 data->val_out = val;
841 break;
8199d3a7 842 }
559fb51b 843 case SIOCSMIIREG: {
8199d3a7
CL
844 struct cphy *phy = adapter->port[dev->if_port].phy;
845
559fb51b
SB
846 if (!capable(CAP_NET_ADMIN))
847 return -EPERM;
848 if (!phy->mdio_write)
849 return -EOPNOTSUPP;
8199d3a7 850 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
559fb51b
SB
851 data->val_in);
852 break;
8199d3a7
CL
853 }
854
8199d3a7
CL
855 default:
856 return -EOPNOTSUPP;
857 }
858 return 0;
859}
860
861static int t1_change_mtu(struct net_device *dev, int new_mtu)
862{
863 int ret;
864 struct adapter *adapter = dev->priv;
865 struct cmac *mac = adapter->port[dev->if_port].mac;
866
867 if (!mac->ops->set_mtu)
559fb51b 868 return -EOPNOTSUPP;
8199d3a7 869 if (new_mtu < 68)
559fb51b 870 return -EINVAL;
8199d3a7
CL
871 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
872 return ret;
873 dev->mtu = new_mtu;
874 return 0;
875}
876
877static int t1_set_mac_addr(struct net_device *dev, void *p)
878{
879 struct adapter *adapter = dev->priv;
880 struct cmac *mac = adapter->port[dev->if_port].mac;
881 struct sockaddr *addr = p;
882
883 if (!mac->ops->macaddress_set)
884 return -EOPNOTSUPP;
885
886 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
887 mac->ops->macaddress_set(mac, dev->dev_addr);
888 return 0;
889}
890
891#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
892static void vlan_rx_register(struct net_device *dev,
893 struct vlan_group *grp)
894{
895 struct adapter *adapter = dev->priv;
896
897 spin_lock_irq(&adapter->async_lock);
898 adapter->vlan_grp = grp;
899 t1_set_vlan_accel(adapter, grp != NULL);
900 spin_unlock_irq(&adapter->async_lock);
901}
902
903static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
904{
905 struct adapter *adapter = dev->priv;
906
907 spin_lock_irq(&adapter->async_lock);
908 if (adapter->vlan_grp)
909 adapter->vlan_grp->vlan_devices[vid] = NULL;
910 spin_unlock_irq(&adapter->async_lock);
911}
912#endif
913
914#ifdef CONFIG_NET_POLL_CONTROLLER
915static void t1_netpoll(struct net_device *dev)
916{
559fb51b 917 unsigned long flags;
8199d3a7
CL
918 struct adapter *adapter = dev->priv;
919
559fb51b 920 local_irq_save(flags);
7d12e780 921 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
559fb51b 922 local_irq_restore(flags);
8199d3a7
CL
923}
924#endif
925
926/*
927 * Periodic accumulation of MAC statistics. This is used only if the MAC
928 * does not have any other way to prevent stats counter overflow.
929 */
c4028958 930static void mac_stats_task(struct work_struct *work)
8199d3a7
CL
931{
932 int i;
c4028958
DH
933 struct adapter *adapter =
934 container_of(work, struct adapter, stats_update_task.work);
8199d3a7
CL
935
936 for_each_port(adapter, i) {
937 struct port_info *p = &adapter->port[i];
938
939 if (netif_running(p->dev))
940 p->mac->ops->statistics_update(p->mac,
941 MAC_STATS_UPDATE_FAST);
942 }
943
944 /* Schedule the next statistics update if any port is active. */
945 spin_lock(&adapter->work_lock);
946 if (adapter->open_device_map & PORT_MASK)
947 schedule_mac_stats_update(adapter,
948 adapter->params.stats_update_period);
949 spin_unlock(&adapter->work_lock);
950}
951
952/*
953 * Processes elmer0 external interrupts in process context.
954 */
c4028958 955static void ext_intr_task(struct work_struct *work)
8199d3a7 956{
c4028958
DH
957 struct adapter *adapter =
958 container_of(work, struct adapter, ext_intr_handler_task);
8199d3a7
CL
959
960 elmer0_ext_intr_handler(adapter);
961
962 /* Now reenable external interrupts */
559fb51b 963 spin_lock_irq(&adapter->async_lock);
8199d3a7 964 adapter->slow_intr_mask |= F_PL_INTR_EXT;
559fb51b
SB
965 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
966 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
967 adapter->regs + A_PL_ENABLE);
968 spin_unlock_irq(&adapter->async_lock);
8199d3a7
CL
969}
970
971/*
972 * Interrupt-context handler for elmer0 external interrupts.
973 */
974void t1_elmer0_ext_intr(struct adapter *adapter)
975{
8199d3a7
CL
976 /*
977 * Schedule a task to handle external interrupts as we require
978 * a process context. We disable EXT interrupts in the interim
979 * and let the task reenable them when it's done.
980 */
981 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
559fb51b
SB
982 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
983 adapter->regs + A_PL_ENABLE);
8199d3a7
CL
984 schedule_work(&adapter->ext_intr_handler_task);
985}
986
987void t1_fatal_err(struct adapter *adapter)
988{
989 if (adapter->flags & FULL_INIT_DONE) {
990 t1_sge_stop(adapter->sge);
991 t1_interrupts_disable(adapter);
992 }
993 CH_ALERT("%s: encountered fatal error, operation suspended\n",
994 adapter->name);
995}
996
8199d3a7
CL
997static int __devinit init_one(struct pci_dev *pdev,
998 const struct pci_device_id *ent)
999{
1000 static int version_printed;
1001
1002 int i, err, pci_using_dac = 0;
1003 unsigned long mmio_start, mmio_len;
1004 const struct board_info *bi;
1005 struct adapter *adapter = NULL;
1006 struct port_info *pi;
1007
1008 if (!version_printed) {
559fb51b
SB
1009 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1010 DRV_VERSION);
8199d3a7
CL
1011 ++version_printed;
1012 }
1013
1014 err = pci_enable_device(pdev);
1015 if (err)
559fb51b 1016 return err;
8199d3a7
CL
1017
1018 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1019 CH_ERR("%s: cannot find PCI device memory base address\n",
1020 pci_name(pdev));
1021 err = -ENODEV;
1022 goto out_disable_pdev;
1023 }
1024
559fb51b 1025 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
8199d3a7 1026 pci_using_dac = 1;
559fb51b
SB
1027
1028 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
8199d3a7
CL
1029 CH_ERR("%s: unable to obtain 64-bit DMA for"
1030 "consistent allocations\n", pci_name(pdev));
1031 err = -ENODEV;
1032 goto out_disable_pdev;
1033 }
559fb51b
SB
1034
1035 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
8199d3a7
CL
1036 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1037 goto out_disable_pdev;
1038 }
1039
559fb51b 1040 err = pci_request_regions(pdev, DRV_NAME);
8199d3a7
CL
1041 if (err) {
1042 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1043 goto out_disable_pdev;
1044 }
1045
1046 pci_set_master(pdev);
1047
559fb51b 1048 mmio_start = pci_resource_start(pdev, 0);
8199d3a7
CL
1049 mmio_len = pci_resource_len(pdev, 0);
1050 bi = t1_get_board_info(ent->driver_data);
1051
1052 for (i = 0; i < bi->port_number; ++i) {
1053 struct net_device *netdev;
1054
1055 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1056 if (!netdev) {
1057 err = -ENOMEM;
1058 goto out_free_dev;
1059 }
1060
1061 SET_MODULE_OWNER(netdev);
1062 SET_NETDEV_DEV(netdev, &pdev->dev);
1063
1064 if (!adapter) {
1065 adapter = netdev->priv;
1066 adapter->pdev = pdev;
1067 adapter->port[0].dev = netdev; /* so we don't leak it */
1068
1069 adapter->regs = ioremap(mmio_start, mmio_len);
1070 if (!adapter->regs) {
1071 CH_ERR("%s: cannot map device registers\n",
1072 pci_name(pdev));
1073 err = -ENOMEM;
1074 goto out_free_dev;
1075 }
1076
1077 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1078 err = -ENODEV; /* Can't handle this chip rev */
1079 goto out_free_dev;
1080 }
1081
1082 adapter->name = pci_name(pdev);
1083 adapter->msg_enable = dflt_msg_enable;
1084 adapter->mmio_len = mmio_len;
1085
1086 init_MUTEX(&adapter->mib_mutex);
1087 spin_lock_init(&adapter->tpi_lock);
1088 spin_lock_init(&adapter->work_lock);
1089 spin_lock_init(&adapter->async_lock);
1090
1091 INIT_WORK(&adapter->ext_intr_handler_task,
c4028958
DH
1092 ext_intr_task);
1093 INIT_DELAYED_WORK(&adapter->stats_update_task,
1094 mac_stats_task);
559fb51b
SB
1095#ifdef work_struct
1096 init_timer(&adapter->stats_update_timer);
1097 adapter->stats_update_timer.function = mac_stats_timer;
1098 adapter->stats_update_timer.data =
1099 (unsigned long)adapter;
1100#endif
8199d3a7
CL
1101
1102 pci_set_drvdata(pdev, netdev);
8199d3a7
CL
1103 }
1104
1105 pi = &adapter->port[i];
1106 pi->dev = netdev;
1107 netif_carrier_off(netdev);
1108 netdev->irq = pdev->irq;
1109 netdev->if_port = i;
1110 netdev->mem_start = mmio_start;
1111 netdev->mem_end = mmio_start + mmio_len - 1;
1112 netdev->priv = adapter;
1113 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
559fb51b
SB
1114 netdev->features |= NETIF_F_LLTX;
1115
8199d3a7
CL
1116 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1117 if (pci_using_dac)
1118 netdev->features |= NETIF_F_HIGHDMA;
1119 if (vlan_tso_capable(adapter)) {
8199d3a7
CL
1120#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1121 adapter->flags |= VLAN_ACCEL_CAPABLE;
1122 netdev->features |=
1123 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1124 netdev->vlan_rx_register = vlan_rx_register;
1125 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1126#endif
1127 adapter->flags |= TSO_CAPABLE;
1128 netdev->features |= NETIF_F_TSO;
1129 }
1130
1131 netdev->open = cxgb_open;
1132 netdev->stop = cxgb_close;
1133 netdev->hard_start_xmit = t1_start_xmit;
1134 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1135 sizeof(struct cpl_tx_pkt_lso) :
1136 sizeof(struct cpl_tx_pkt);
1137 netdev->get_stats = t1_get_stats;
1138 netdev->set_multicast_list = t1_set_rxmode;
1139 netdev->do_ioctl = t1_ioctl;
1140 netdev->change_mtu = t1_change_mtu;
1141 netdev->set_mac_address = t1_set_mac_addr;
1142#ifdef CONFIG_NET_POLL_CONTROLLER
1143 netdev->poll_controller = t1_netpoll;
1144#endif
1145 netdev->weight = 64;
1146
559fb51b 1147 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
8199d3a7
CL
1148 }
1149
1150 if (t1_init_sw_modules(adapter, bi) < 0) {
1151 err = -ENODEV;
1152 goto out_free_dev;
1153 }
1154
1155 /*
1156 * The card is now ready to go. If any errors occur during device
1157 * registration we do not fail the whole card but rather proceed only
1158 * with the ports we manage to register successfully. However we must
1159 * register at least one net device.
1160 */
1161 for (i = 0; i < bi->port_number; ++i) {
1162 err = register_netdev(adapter->port[i].dev);
1163 if (err)
1164 CH_WARN("%s: cannot register net device %s, skipping\n",
1165 pci_name(pdev), adapter->port[i].dev->name);
1166 else {
1167 /*
1168 * Change the name we use for messages to the name of
1169 * the first successfully registered interface.
1170 */
1171 if (!adapter->registered_device_map)
1172 adapter->name = adapter->port[i].dev->name;
1173
559fb51b 1174 __set_bit(i, &adapter->registered_device_map);
8199d3a7
CL
1175 }
1176 }
1177 if (!adapter->registered_device_map) {
1178 CH_ERR("%s: could not register any net devices\n",
1179 pci_name(pdev));
1180 goto out_release_adapter_res;
1181 }
1182
1183 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1184 bi->desc, adapter->params.chip_revision,
1185 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1186 adapter->params.pci.speed, adapter->params.pci.width);
1187 return 0;
1188
1189 out_release_adapter_res:
1190 t1_free_sw_modules(adapter);
1191 out_free_dev:
1192 if (adapter) {
559fb51b 1193 if (adapter->regs) iounmap(adapter->regs);
8199d3a7 1194 for (i = bi->port_number - 1; i >= 0; --i)
559fb51b
SB
1195 if (adapter->port[i].dev) {
1196 cxgb_proc_cleanup(adapter, proc_root_driver);
1197 kfree(adapter->port[i].dev);
1198 }
8199d3a7
CL
1199 }
1200 pci_release_regions(pdev);
1201 out_disable_pdev:
1202 pci_disable_device(pdev);
1203 pci_set_drvdata(pdev, NULL);
1204 return err;
1205}
1206
1207static inline void t1_sw_reset(struct pci_dev *pdev)
1208{
1209 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1210 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1211}
1212
1213static void __devexit remove_one(struct pci_dev *pdev)
1214{
1215 struct net_device *dev = pci_get_drvdata(pdev);
1216
1217 if (dev) {
1218 int i;
1219 struct adapter *adapter = dev->priv;
1220
1221 for_each_port(adapter, i)
1222 if (test_bit(i, &adapter->registered_device_map))
1223 unregister_netdev(adapter->port[i].dev);
1224
1225 t1_free_sw_modules(adapter);
1226 iounmap(adapter->regs);
1227 while (--i >= 0)
559fb51b
SB
1228 if (adapter->port[i].dev) {
1229 cxgb_proc_cleanup(adapter, proc_root_driver);
1230 kfree(adapter->port[i].dev);
1231 }
8199d3a7
CL
1232 pci_release_regions(pdev);
1233 pci_disable_device(pdev);
1234 pci_set_drvdata(pdev, NULL);
1235 t1_sw_reset(pdev);
1236 }
1237}
1238
1239static struct pci_driver driver = {
559fb51b 1240 .name = DRV_NAME,
8199d3a7
CL
1241 .id_table = t1_pci_tbl,
1242 .probe = init_one,
1243 .remove = __devexit_p(remove_one),
1244};
1245
1246static int __init t1_init_module(void)
1247{
29917620 1248 return pci_register_driver(&driver);
8199d3a7
CL
1249}
1250
1251static void __exit t1_cleanup_module(void)
1252{
1253 pci_unregister_driver(&driver);
1254}
1255
1256module_init(t1_init_module);
1257module_exit(t1_cleanup_module);