1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
15 #define DRV_MODULE_NAME "bnx2"
16 #define PFX DRV_MODULE_NAME ": "
17 #define DRV_MODULE_VERSION "1.4.31"
18 #define DRV_MODULE_RELDATE "January 19, 2006"
20 #define RUN_AT(x) (jiffies + (x))
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT (5*HZ)
25 static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
33 static int disable_msi = 0;
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
48 /* indexed by board_t, above */
51 } board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
57 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
61 static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
79 static struct flash_spec flash_table[] =
82 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
86 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
91 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
93 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
99 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
220 for (i = 0; i < 50; i++) {
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
277 for (i = 0; i < 50; i++) {
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
306 bnx2_disable_int(struct bnx2 *bp)
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
314 bnx2_enable_int(struct bnx2 *bp)
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
325 val = REG_RD(bp, BNX2_HC_COMMAND);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
330 bnx2_disable_int_sync(struct bnx2 *bp)
332 atomic_inc(&bp->intr_sem);
333 bnx2_disable_int(bp);
334 synchronize_irq(bp->pdev->irq);
338 bnx2_netif_stop(struct bnx2 *bp)
340 bnx2_disable_int_sync(bp);
341 if (netif_running(bp->dev)) {
342 netif_poll_disable(bp->dev);
343 netif_tx_disable(bp->dev);
344 bp->dev->trans_start = jiffies; /* prevent tx timeout */
349 bnx2_netif_start(struct bnx2 *bp)
351 if (atomic_dec_and_test(&bp->intr_sem)) {
352 if (netif_running(bp->dev)) {
353 netif_wake_queue(bp->dev);
354 netif_poll_enable(bp->dev);
361 bnx2_free_mem(struct bnx2 *bp)
364 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
365 bp->stats_blk, bp->stats_blk_mapping);
366 bp->stats_blk = NULL;
368 if (bp->status_blk) {
369 pci_free_consistent(bp->pdev, sizeof(struct status_block),
370 bp->status_blk, bp->status_blk_mapping);
371 bp->status_blk = NULL;
373 if (bp->tx_desc_ring) {
374 pci_free_consistent(bp->pdev,
375 sizeof(struct tx_bd) * TX_DESC_CNT,
376 bp->tx_desc_ring, bp->tx_desc_mapping);
377 bp->tx_desc_ring = NULL;
379 kfree(bp->tx_buf_ring);
380 bp->tx_buf_ring = NULL;
381 if (bp->rx_desc_ring) {
382 pci_free_consistent(bp->pdev,
383 sizeof(struct rx_bd) * RX_DESC_CNT,
384 bp->rx_desc_ring, bp->rx_desc_mapping);
385 bp->rx_desc_ring = NULL;
387 kfree(bp->rx_buf_ring);
388 bp->rx_buf_ring = NULL;
392 bnx2_alloc_mem(struct bnx2 *bp)
394 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
396 if (bp->tx_buf_ring == NULL)
399 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
400 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
401 sizeof(struct tx_bd) *
403 &bp->tx_desc_mapping);
404 if (bp->tx_desc_ring == NULL)
407 bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
409 if (bp->rx_buf_ring == NULL)
412 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
413 bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
414 sizeof(struct rx_bd) *
416 &bp->rx_desc_mapping);
417 if (bp->rx_desc_ring == NULL)
420 bp->status_blk = pci_alloc_consistent(bp->pdev,
421 sizeof(struct status_block),
422 &bp->status_blk_mapping);
423 if (bp->status_blk == NULL)
426 memset(bp->status_blk, 0, sizeof(struct status_block));
428 bp->stats_blk = pci_alloc_consistent(bp->pdev,
429 sizeof(struct statistics_block),
430 &bp->stats_blk_mapping);
431 if (bp->stats_blk == NULL)
434 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
444 bnx2_report_fw_link(struct bnx2 *bp)
446 u32 fw_link_status = 0;
451 switch (bp->line_speed) {
453 if (bp->duplex == DUPLEX_HALF)
454 fw_link_status = BNX2_LINK_STATUS_10HALF;
456 fw_link_status = BNX2_LINK_STATUS_10FULL;
459 if (bp->duplex == DUPLEX_HALF)
460 fw_link_status = BNX2_LINK_STATUS_100HALF;
462 fw_link_status = BNX2_LINK_STATUS_100FULL;
465 if (bp->duplex == DUPLEX_HALF)
466 fw_link_status = BNX2_LINK_STATUS_1000HALF;
468 fw_link_status = BNX2_LINK_STATUS_1000FULL;
471 if (bp->duplex == DUPLEX_HALF)
472 fw_link_status = BNX2_LINK_STATUS_2500HALF;
474 fw_link_status = BNX2_LINK_STATUS_2500FULL;
478 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
481 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
483 bnx2_read_phy(bp, MII_BMSR, &bmsr);
484 bnx2_read_phy(bp, MII_BMSR, &bmsr);
486 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
487 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
488 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
490 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
494 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
496 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
500 bnx2_report_link(struct bnx2 *bp)
503 netif_carrier_on(bp->dev);
504 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
506 printk("%d Mbps ", bp->line_speed);
508 if (bp->duplex == DUPLEX_FULL)
509 printk("full duplex");
511 printk("half duplex");
514 if (bp->flow_ctrl & FLOW_CTRL_RX) {
515 printk(", receive ");
516 if (bp->flow_ctrl & FLOW_CTRL_TX)
517 printk("& transmit ");
520 printk(", transmit ");
522 printk("flow control ON");
527 netif_carrier_off(bp->dev);
528 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
531 bnx2_report_fw_link(bp);
535 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
537 u32 local_adv, remote_adv;
540 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
541 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
543 if (bp->duplex == DUPLEX_FULL) {
544 bp->flow_ctrl = bp->req_flow_ctrl;
549 if (bp->duplex != DUPLEX_FULL) {
553 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
554 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
557 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
558 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
559 bp->flow_ctrl |= FLOW_CTRL_TX;
560 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
561 bp->flow_ctrl |= FLOW_CTRL_RX;
565 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
566 bnx2_read_phy(bp, MII_LPA, &remote_adv);
568 if (bp->phy_flags & PHY_SERDES_FLAG) {
569 u32 new_local_adv = 0;
570 u32 new_remote_adv = 0;
572 if (local_adv & ADVERTISE_1000XPAUSE)
573 new_local_adv |= ADVERTISE_PAUSE_CAP;
574 if (local_adv & ADVERTISE_1000XPSE_ASYM)
575 new_local_adv |= ADVERTISE_PAUSE_ASYM;
576 if (remote_adv & ADVERTISE_1000XPAUSE)
577 new_remote_adv |= ADVERTISE_PAUSE_CAP;
578 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
579 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
581 local_adv = new_local_adv;
582 remote_adv = new_remote_adv;
585 /* See Table 28B-3 of 802.3ab-1999 spec. */
586 if (local_adv & ADVERTISE_PAUSE_CAP) {
587 if(local_adv & ADVERTISE_PAUSE_ASYM) {
588 if (remote_adv & ADVERTISE_PAUSE_CAP) {
589 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
591 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
592 bp->flow_ctrl = FLOW_CTRL_RX;
596 if (remote_adv & ADVERTISE_PAUSE_CAP) {
597 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
601 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
602 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
603 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
605 bp->flow_ctrl = FLOW_CTRL_TX;
611 bnx2_5708s_linkup(struct bnx2 *bp)
616 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
617 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
618 case BCM5708S_1000X_STAT1_SPEED_10:
619 bp->line_speed = SPEED_10;
621 case BCM5708S_1000X_STAT1_SPEED_100:
622 bp->line_speed = SPEED_100;
624 case BCM5708S_1000X_STAT1_SPEED_1G:
625 bp->line_speed = SPEED_1000;
627 case BCM5708S_1000X_STAT1_SPEED_2G5:
628 bp->line_speed = SPEED_2500;
631 if (val & BCM5708S_1000X_STAT1_FD)
632 bp->duplex = DUPLEX_FULL;
634 bp->duplex = DUPLEX_HALF;
640 bnx2_5706s_linkup(struct bnx2 *bp)
642 u32 bmcr, local_adv, remote_adv, common;
645 bp->line_speed = SPEED_1000;
647 bnx2_read_phy(bp, MII_BMCR, &bmcr);
648 if (bmcr & BMCR_FULLDPLX) {
649 bp->duplex = DUPLEX_FULL;
652 bp->duplex = DUPLEX_HALF;
655 if (!(bmcr & BMCR_ANENABLE)) {
659 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
660 bnx2_read_phy(bp, MII_LPA, &remote_adv);
662 common = local_adv & remote_adv;
663 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
665 if (common & ADVERTISE_1000XFULL) {
666 bp->duplex = DUPLEX_FULL;
669 bp->duplex = DUPLEX_HALF;
677 bnx2_copper_linkup(struct bnx2 *bp)
681 bnx2_read_phy(bp, MII_BMCR, &bmcr);
682 if (bmcr & BMCR_ANENABLE) {
683 u32 local_adv, remote_adv, common;
685 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
686 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
688 common = local_adv & (remote_adv >> 2);
689 if (common & ADVERTISE_1000FULL) {
690 bp->line_speed = SPEED_1000;
691 bp->duplex = DUPLEX_FULL;
693 else if (common & ADVERTISE_1000HALF) {
694 bp->line_speed = SPEED_1000;
695 bp->duplex = DUPLEX_HALF;
698 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
699 bnx2_read_phy(bp, MII_LPA, &remote_adv);
701 common = local_adv & remote_adv;
702 if (common & ADVERTISE_100FULL) {
703 bp->line_speed = SPEED_100;
704 bp->duplex = DUPLEX_FULL;
706 else if (common & ADVERTISE_100HALF) {
707 bp->line_speed = SPEED_100;
708 bp->duplex = DUPLEX_HALF;
710 else if (common & ADVERTISE_10FULL) {
711 bp->line_speed = SPEED_10;
712 bp->duplex = DUPLEX_FULL;
714 else if (common & ADVERTISE_10HALF) {
715 bp->line_speed = SPEED_10;
716 bp->duplex = DUPLEX_HALF;
725 if (bmcr & BMCR_SPEED100) {
726 bp->line_speed = SPEED_100;
729 bp->line_speed = SPEED_10;
731 if (bmcr & BMCR_FULLDPLX) {
732 bp->duplex = DUPLEX_FULL;
735 bp->duplex = DUPLEX_HALF;
743 bnx2_set_mac_link(struct bnx2 *bp)
747 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
748 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
749 (bp->duplex == DUPLEX_HALF)) {
750 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
753 /* Configure the EMAC mode register. */
754 val = REG_RD(bp, BNX2_EMAC_MODE);
756 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
757 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
761 switch (bp->line_speed) {
763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
764 val |= BNX2_EMAC_MODE_PORT_MII_10;
769 val |= BNX2_EMAC_MODE_PORT_MII;
772 val |= BNX2_EMAC_MODE_25G;
775 val |= BNX2_EMAC_MODE_PORT_GMII;
780 val |= BNX2_EMAC_MODE_PORT_GMII;
783 /* Set the MAC to operate in the appropriate duplex mode. */
784 if (bp->duplex == DUPLEX_HALF)
785 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
786 REG_WR(bp, BNX2_EMAC_MODE, val);
788 /* Enable/disable rx PAUSE. */
789 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
791 if (bp->flow_ctrl & FLOW_CTRL_RX)
792 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
793 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
795 /* Enable/disable tx PAUSE. */
796 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
797 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
799 if (bp->flow_ctrl & FLOW_CTRL_TX)
800 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
801 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
803 /* Acknowledge the interrupt. */
804 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
810 bnx2_set_link(struct bnx2 *bp)
815 if (bp->loopback == MAC_LOOPBACK) {
820 link_up = bp->link_up;
822 bnx2_read_phy(bp, MII_BMSR, &bmsr);
823 bnx2_read_phy(bp, MII_BMSR, &bmsr);
825 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
826 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
829 val = REG_RD(bp, BNX2_EMAC_STATUS);
830 if (val & BNX2_EMAC_STATUS_LINK)
831 bmsr |= BMSR_LSTATUS;
833 bmsr &= ~BMSR_LSTATUS;
836 if (bmsr & BMSR_LSTATUS) {
839 if (bp->phy_flags & PHY_SERDES_FLAG) {
840 if (CHIP_NUM(bp) == CHIP_NUM_5706)
841 bnx2_5706s_linkup(bp);
842 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
843 bnx2_5708s_linkup(bp);
846 bnx2_copper_linkup(bp);
848 bnx2_resolve_flow_ctrl(bp);
851 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
852 (bp->autoneg & AUTONEG_SPEED)) {
856 bnx2_read_phy(bp, MII_BMCR, &bmcr);
857 if (!(bmcr & BMCR_ANENABLE)) {
858 bnx2_write_phy(bp, MII_BMCR, bmcr |
862 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
866 if (bp->link_up != link_up) {
867 bnx2_report_link(bp);
870 bnx2_set_mac_link(bp);
876 bnx2_reset_phy(struct bnx2 *bp)
881 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
883 #define PHY_RESET_MAX_WAIT 100
884 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
887 bnx2_read_phy(bp, MII_BMCR, ®);
888 if (!(reg & BMCR_RESET)) {
893 if (i == PHY_RESET_MAX_WAIT) {
900 bnx2_phy_get_pause_adv(struct bnx2 *bp)
904 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
905 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
907 if (bp->phy_flags & PHY_SERDES_FLAG) {
908 adv = ADVERTISE_1000XPAUSE;
911 adv = ADVERTISE_PAUSE_CAP;
914 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
915 if (bp->phy_flags & PHY_SERDES_FLAG) {
916 adv = ADVERTISE_1000XPSE_ASYM;
919 adv = ADVERTISE_PAUSE_ASYM;
922 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
924 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
927 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
934 bnx2_setup_serdes_phy(struct bnx2 *bp)
939 if (!(bp->autoneg & AUTONEG_SPEED)) {
941 int force_link_down = 0;
943 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
944 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
945 if (up1 & BCM5708S_UP1_2G5) {
946 up1 &= ~BCM5708S_UP1_2G5;
947 bnx2_write_phy(bp, BCM5708S_UP1, up1);
952 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
953 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
955 bnx2_read_phy(bp, MII_BMCR, &bmcr);
956 new_bmcr = bmcr & ~BMCR_ANENABLE;
957 new_bmcr |= BMCR_SPEED1000;
958 if (bp->req_duplex == DUPLEX_FULL) {
959 adv |= ADVERTISE_1000XFULL;
960 new_bmcr |= BMCR_FULLDPLX;
963 adv |= ADVERTISE_1000XHALF;
964 new_bmcr &= ~BMCR_FULLDPLX;
966 if ((new_bmcr != bmcr) || (force_link_down)) {
967 /* Force a link down visible on the other side */
969 bnx2_write_phy(bp, MII_ADVERTISE, adv &
970 ~(ADVERTISE_1000XFULL |
971 ADVERTISE_1000XHALF));
972 bnx2_write_phy(bp, MII_BMCR, bmcr |
973 BMCR_ANRESTART | BMCR_ANENABLE);
976 netif_carrier_off(bp->dev);
977 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
979 bnx2_write_phy(bp, MII_ADVERTISE, adv);
980 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
985 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
986 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
987 up1 |= BCM5708S_UP1_2G5;
988 bnx2_write_phy(bp, BCM5708S_UP1, up1);
991 if (bp->advertising & ADVERTISED_1000baseT_Full)
992 new_adv |= ADVERTISE_1000XFULL;
994 new_adv |= bnx2_phy_get_pause_adv(bp);
996 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
997 bnx2_read_phy(bp, MII_BMCR, &bmcr);
999 bp->serdes_an_pending = 0;
1000 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1001 /* Force a link down visible on the other side */
1005 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1006 for (i = 0; i < 110; i++) {
1011 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1012 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1014 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1015 /* Speed up link-up time when the link partner
1016 * does not autonegotiate which is very common
1017 * in blade servers. Some blade servers use
1018 * IPMI for kerboard input and it's important
1019 * to minimize link disruptions. Autoneg. involves
1020 * exchanging base pages plus 3 next pages and
1021 * normally completes in about 120 msec.
1023 bp->current_interval = SERDES_AN_TIMEOUT;
1024 bp->serdes_an_pending = 1;
1025 mod_timer(&bp->timer, jiffies + bp->current_interval);
1032 #define ETHTOOL_ALL_FIBRE_SPEED \
1033 (ADVERTISED_1000baseT_Full)
1035 #define ETHTOOL_ALL_COPPER_SPEED \
1036 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1037 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1038 ADVERTISED_1000baseT_Full)
1040 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1041 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1043 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1046 bnx2_setup_copper_phy(struct bnx2 *bp)
1051 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 u32 adv_reg, adv1000_reg;
1055 u32 new_adv_reg = 0;
1056 u32 new_adv1000_reg = 0;
1058 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1059 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1060 ADVERTISE_PAUSE_ASYM);
1062 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1063 adv1000_reg &= PHY_ALL_1000_SPEED;
1065 if (bp->advertising & ADVERTISED_10baseT_Half)
1066 new_adv_reg |= ADVERTISE_10HALF;
1067 if (bp->advertising & ADVERTISED_10baseT_Full)
1068 new_adv_reg |= ADVERTISE_10FULL;
1069 if (bp->advertising & ADVERTISED_100baseT_Half)
1070 new_adv_reg |= ADVERTISE_100HALF;
1071 if (bp->advertising & ADVERTISED_100baseT_Full)
1072 new_adv_reg |= ADVERTISE_100FULL;
1073 if (bp->advertising & ADVERTISED_1000baseT_Full)
1074 new_adv1000_reg |= ADVERTISE_1000FULL;
1076 new_adv_reg |= ADVERTISE_CSMA;
1078 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1080 if ((adv1000_reg != new_adv1000_reg) ||
1081 (adv_reg != new_adv_reg) ||
1082 ((bmcr & BMCR_ANENABLE) == 0)) {
1084 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1085 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1086 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1089 else if (bp->link_up) {
1090 /* Flow ctrl may have changed from auto to forced */
1091 /* or vice-versa. */
1093 bnx2_resolve_flow_ctrl(bp);
1094 bnx2_set_mac_link(bp);
1100 if (bp->req_line_speed == SPEED_100) {
1101 new_bmcr |= BMCR_SPEED100;
1103 if (bp->req_duplex == DUPLEX_FULL) {
1104 new_bmcr |= BMCR_FULLDPLX;
1106 if (new_bmcr != bmcr) {
1110 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1111 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1113 if (bmsr & BMSR_LSTATUS) {
1114 /* Force link down */
1115 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1118 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1119 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1121 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1124 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1126 /* Normally, the new speed is setup after the link has
1127 * gone down and up again. In some cases, link will not go
1128 * down so we need to set up the new speed here.
1130 if (bmsr & BMSR_LSTATUS) {
1131 bp->line_speed = bp->req_line_speed;
1132 bp->duplex = bp->req_duplex;
1133 bnx2_resolve_flow_ctrl(bp);
1134 bnx2_set_mac_link(bp);
1141 bnx2_setup_phy(struct bnx2 *bp)
1143 if (bp->loopback == MAC_LOOPBACK)
1146 if (bp->phy_flags & PHY_SERDES_FLAG) {
1147 return (bnx2_setup_serdes_phy(bp));
1150 return (bnx2_setup_copper_phy(bp));
1155 bnx2_init_5708s_phy(struct bnx2 *bp)
1159 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1160 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1161 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1163 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1164 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1165 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1167 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1168 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1169 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1171 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1172 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1173 val |= BCM5708S_UP1_2G5;
1174 bnx2_write_phy(bp, BCM5708S_UP1, val);
1177 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1178 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1179 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1180 /* increase tx signal amplitude */
1181 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1182 BCM5708S_BLK_ADDR_TX_MISC);
1183 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1184 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1185 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1186 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1189 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1190 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1195 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1196 BNX2_SHARED_HW_CFG_CONFIG);
1197 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1198 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1199 BCM5708S_BLK_ADDR_TX_MISC);
1200 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1201 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1202 BCM5708S_BLK_ADDR_DIG);
1209 bnx2_init_5706s_phy(struct bnx2 *bp)
1211 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1213 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1214 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1217 if (bp->dev->mtu > 1500) {
1220 /* Set extended packet length bit */
1221 bnx2_write_phy(bp, 0x18, 0x7);
1222 bnx2_read_phy(bp, 0x18, &val);
1223 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1225 bnx2_write_phy(bp, 0x1c, 0x6c00);
1226 bnx2_read_phy(bp, 0x1c, &val);
1227 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1232 bnx2_write_phy(bp, 0x18, 0x7);
1233 bnx2_read_phy(bp, 0x18, &val);
1234 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1236 bnx2_write_phy(bp, 0x1c, 0x6c00);
1237 bnx2_read_phy(bp, 0x1c, &val);
1238 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1245 bnx2_init_copper_phy(struct bnx2 *bp)
1249 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1251 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1252 bnx2_write_phy(bp, 0x18, 0x0c00);
1253 bnx2_write_phy(bp, 0x17, 0x000a);
1254 bnx2_write_phy(bp, 0x15, 0x310b);
1255 bnx2_write_phy(bp, 0x17, 0x201f);
1256 bnx2_write_phy(bp, 0x15, 0x9506);
1257 bnx2_write_phy(bp, 0x17, 0x401f);
1258 bnx2_write_phy(bp, 0x15, 0x14e2);
1259 bnx2_write_phy(bp, 0x18, 0x0400);
1262 if (bp->dev->mtu > 1500) {
1263 /* Set extended packet length bit */
1264 bnx2_write_phy(bp, 0x18, 0x7);
1265 bnx2_read_phy(bp, 0x18, &val);
1266 bnx2_write_phy(bp, 0x18, val | 0x4000);
1268 bnx2_read_phy(bp, 0x10, &val);
1269 bnx2_write_phy(bp, 0x10, val | 0x1);
1272 bnx2_write_phy(bp, 0x18, 0x7);
1273 bnx2_read_phy(bp, 0x18, &val);
1274 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1276 bnx2_read_phy(bp, 0x10, &val);
1277 bnx2_write_phy(bp, 0x10, val & ~0x1);
1280 /* ethernet@wirespeed */
1281 bnx2_write_phy(bp, 0x18, 0x7007);
1282 bnx2_read_phy(bp, 0x18, &val);
1283 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1289 bnx2_init_phy(struct bnx2 *bp)
1294 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1295 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1297 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1301 bnx2_read_phy(bp, MII_PHYSID1, &val);
1302 bp->phy_id = val << 16;
1303 bnx2_read_phy(bp, MII_PHYSID2, &val);
1304 bp->phy_id |= val & 0xffff;
1306 if (bp->phy_flags & PHY_SERDES_FLAG) {
1307 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1308 rc = bnx2_init_5706s_phy(bp);
1309 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1310 rc = bnx2_init_5708s_phy(bp);
1313 rc = bnx2_init_copper_phy(bp);
1322 bnx2_set_mac_loopback(struct bnx2 *bp)
1326 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1327 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1328 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1329 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1334 static int bnx2_test_link(struct bnx2 *);
1337 bnx2_set_phy_loopback(struct bnx2 *bp)
1342 spin_lock_bh(&bp->phy_lock);
1343 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1345 spin_unlock_bh(&bp->phy_lock);
1349 for (i = 0; i < 10; i++) {
1350 if (bnx2_test_link(bp) == 0)
1355 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1356 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1357 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1358 BNX2_EMAC_MODE_25G);
1360 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1361 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1367 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1373 msg_data |= bp->fw_wr_seq;
1375 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1377 /* wait for an acknowledgement. */
1378 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1381 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1383 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1386 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1389 /* If we timed out, inform the firmware that this is the case. */
1390 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1392 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1395 msg_data &= ~BNX2_DRV_MSG_CODE;
1396 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1398 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1403 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1410 bnx2_init_context(struct bnx2 *bp)
1416 u32 vcid_addr, pcid_addr, offset;
1420 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1423 vcid_addr = GET_PCID_ADDR(vcid);
1425 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1430 pcid_addr = GET_PCID_ADDR(new_vcid);
1433 vcid_addr = GET_CID_ADDR(vcid);
1434 pcid_addr = vcid_addr;
1437 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1438 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1440 /* Zero out the context. */
1441 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1442 CTX_WR(bp, 0x00, offset, 0);
1445 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1446 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1451 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1457 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1458 if (good_mbuf == NULL) {
1459 printk(KERN_ERR PFX "Failed to allocate memory in "
1460 "bnx2_alloc_bad_rbuf\n");
1464 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1465 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1469 /* Allocate a bunch of mbufs and save the good ones in an array. */
1470 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1471 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1472 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1474 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1476 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1478 /* The addresses with Bit 9 set are bad memory blocks. */
1479 if (!(val & (1 << 9))) {
1480 good_mbuf[good_mbuf_cnt] = (u16) val;
1484 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1487 /* Free the good ones back to the mbuf pool thus discarding
1488 * all the bad ones. */
1489 while (good_mbuf_cnt) {
1492 val = good_mbuf[good_mbuf_cnt];
1493 val = (val << 9) | val | 1;
1495 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1502 bnx2_set_mac_addr(struct bnx2 *bp)
1505 u8 *mac_addr = bp->dev->dev_addr;
1507 val = (mac_addr[0] << 8) | mac_addr[1];
1509 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1511 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1512 (mac_addr[4] << 8) | mac_addr[5];
1514 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1518 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1520 struct sk_buff *skb;
1521 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1523 struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1524 unsigned long align;
1526 skb = dev_alloc_skb(bp->rx_buf_size);
1531 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1532 skb_reserve(skb, 8 - align);
1536 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1537 PCI_DMA_FROMDEVICE);
1540 pci_unmap_addr_set(rx_buf, mapping, mapping);
1542 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1543 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1545 bp->rx_prod_bseq += bp->rx_buf_use_size;
1551 bnx2_phy_int(struct bnx2 *bp)
1553 u32 new_link_state, old_link_state;
1555 new_link_state = bp->status_blk->status_attn_bits &
1556 STATUS_ATTN_BITS_LINK_STATE;
1557 old_link_state = bp->status_blk->status_attn_bits_ack &
1558 STATUS_ATTN_BITS_LINK_STATE;
1559 if (new_link_state != old_link_state) {
1560 if (new_link_state) {
1561 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1562 STATUS_ATTN_BITS_LINK_STATE);
1565 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1566 STATUS_ATTN_BITS_LINK_STATE);
1573 bnx2_tx_int(struct bnx2 *bp)
1575 struct status_block *sblk = bp->status_blk;
1576 u16 hw_cons, sw_cons, sw_ring_cons;
1579 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1580 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1583 sw_cons = bp->tx_cons;
1585 while (sw_cons != hw_cons) {
1586 struct sw_bd *tx_buf;
1587 struct sk_buff *skb;
1590 sw_ring_cons = TX_RING_IDX(sw_cons);
1592 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1595 /* partial BD completions possible with TSO packets */
1596 if (skb_shinfo(skb)->tso_size) {
1597 u16 last_idx, last_ring_idx;
1599 last_idx = sw_cons +
1600 skb_shinfo(skb)->nr_frags + 1;
1601 last_ring_idx = sw_ring_cons +
1602 skb_shinfo(skb)->nr_frags + 1;
1603 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1606 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1611 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1612 skb_headlen(skb), PCI_DMA_TODEVICE);
1615 last = skb_shinfo(skb)->nr_frags;
1617 for (i = 0; i < last; i++) {
1618 sw_cons = NEXT_TX_BD(sw_cons);
1620 pci_unmap_page(bp->pdev,
1622 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1624 skb_shinfo(skb)->frags[i].size,
1628 sw_cons = NEXT_TX_BD(sw_cons);
1630 tx_free_bd += last + 1;
1632 dev_kfree_skb_irq(skb);
1634 hw_cons = bp->hw_tx_cons =
1635 sblk->status_tx_quick_consumer_index0;
1637 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1642 bp->tx_cons = sw_cons;
1644 if (unlikely(netif_queue_stopped(bp->dev))) {
1645 spin_lock(&bp->tx_lock);
1646 if ((netif_queue_stopped(bp->dev)) &&
1647 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1649 netif_wake_queue(bp->dev);
1651 spin_unlock(&bp->tx_lock);
1656 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1659 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1660 struct rx_bd *cons_bd, *prod_bd;
1662 cons_rx_buf = &bp->rx_buf_ring[cons];
1663 prod_rx_buf = &bp->rx_buf_ring[prod];
1665 pci_dma_sync_single_for_device(bp->pdev,
1666 pci_unmap_addr(cons_rx_buf, mapping),
1667 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1669 bp->rx_prod_bseq += bp->rx_buf_use_size;
1671 prod_rx_buf->skb = skb;
1676 pci_unmap_addr_set(prod_rx_buf, mapping,
1677 pci_unmap_addr(cons_rx_buf, mapping));
1679 cons_bd = &bp->rx_desc_ring[cons];
1680 prod_bd = &bp->rx_desc_ring[prod];
1681 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1682 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1686 bnx2_rx_int(struct bnx2 *bp, int budget)
1688 struct status_block *sblk = bp->status_blk;
1689 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1690 struct l2_fhdr *rx_hdr;
1693 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1694 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1697 sw_cons = bp->rx_cons;
1698 sw_prod = bp->rx_prod;
1700 /* Memory barrier necessary as speculative reads of the rx
1701 * buffer can be ahead of the index in the status block
1704 while (sw_cons != hw_cons) {
1707 struct sw_bd *rx_buf;
1708 struct sk_buff *skb;
1709 dma_addr_t dma_addr;
1711 sw_ring_cons = RX_RING_IDX(sw_cons);
1712 sw_ring_prod = RX_RING_IDX(sw_prod);
1714 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1719 dma_addr = pci_unmap_addr(rx_buf, mapping);
1721 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1722 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1724 rx_hdr = (struct l2_fhdr *) skb->data;
1725 len = rx_hdr->l2_fhdr_pkt_len - 4;
1727 if ((status = rx_hdr->l2_fhdr_status) &
1728 (L2_FHDR_ERRORS_BAD_CRC |
1729 L2_FHDR_ERRORS_PHY_DECODE |
1730 L2_FHDR_ERRORS_ALIGNMENT |
1731 L2_FHDR_ERRORS_TOO_SHORT |
1732 L2_FHDR_ERRORS_GIANT_FRAME)) {
1737 /* Since we don't have a jumbo ring, copy small packets
1740 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1741 struct sk_buff *new_skb;
1743 new_skb = dev_alloc_skb(len + 2);
1744 if (new_skb == NULL)
1748 memcpy(new_skb->data,
1749 skb->data + bp->rx_offset - 2,
1752 skb_reserve(new_skb, 2);
1753 skb_put(new_skb, len);
1754 new_skb->dev = bp->dev;
1756 bnx2_reuse_rx_skb(bp, skb,
1757 sw_ring_cons, sw_ring_prod);
1761 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1762 pci_unmap_single(bp->pdev, dma_addr,
1763 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1765 skb_reserve(skb, bp->rx_offset);
1770 bnx2_reuse_rx_skb(bp, skb,
1771 sw_ring_cons, sw_ring_prod);
1775 skb->protocol = eth_type_trans(skb, bp->dev);
1777 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1778 (htons(skb->protocol) != 0x8100)) {
1780 dev_kfree_skb_irq(skb);
1785 skb->ip_summed = CHECKSUM_NONE;
1787 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1788 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1790 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1791 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1792 skb->ip_summed = CHECKSUM_UNNECESSARY;
1796 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1797 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1798 rx_hdr->l2_fhdr_vlan_tag);
1802 netif_receive_skb(skb);
1804 bp->dev->last_rx = jiffies;
1808 sw_cons = NEXT_RX_BD(sw_cons);
1809 sw_prod = NEXT_RX_BD(sw_prod);
1811 if ((rx_pkt == budget))
1814 /* Refresh hw_cons to see if there is new work */
1815 if (sw_cons == hw_cons) {
1816 hw_cons = bp->hw_rx_cons =
1817 sblk->status_rx_quick_consumer_index0;
1818 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1823 bp->rx_cons = sw_cons;
1824 bp->rx_prod = sw_prod;
1826 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1828 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1836 /* MSI ISR - The only difference between this and the INTx ISR
1837 * is that the MSI interrupt is always serviced.
1840 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1842 struct net_device *dev = dev_instance;
1843 struct bnx2 *bp = netdev_priv(dev);
1845 prefetch(bp->status_blk);
1846 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1847 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1848 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1850 /* Return here if interrupt is disabled. */
1851 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1854 netif_rx_schedule(dev);
1860 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1862 struct net_device *dev = dev_instance;
1863 struct bnx2 *bp = netdev_priv(dev);
1865 /* When using INTx, it is possible for the interrupt to arrive
1866 * at the CPU before the status block posted prior to the
1867 * interrupt. Reading a register will flush the status block.
1868 * When using MSI, the MSI message will always complete after
1869 * the status block write.
1871 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1872 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1873 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1876 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1877 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1878 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1880 /* Return here if interrupt is shared and is disabled. */
1881 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1884 netif_rx_schedule(dev);
1890 bnx2_has_work(struct bnx2 *bp)
1892 struct status_block *sblk = bp->status_blk;
1894 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1895 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1898 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1906 bnx2_poll(struct net_device *dev, int *budget)
1908 struct bnx2 *bp = netdev_priv(dev);
1910 if ((bp->status_blk->status_attn_bits &
1911 STATUS_ATTN_BITS_LINK_STATE) !=
1912 (bp->status_blk->status_attn_bits_ack &
1913 STATUS_ATTN_BITS_LINK_STATE)) {
1915 spin_lock(&bp->phy_lock);
1917 spin_unlock(&bp->phy_lock);
1920 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1923 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1924 int orig_budget = *budget;
1927 if (orig_budget > dev->quota)
1928 orig_budget = dev->quota;
1930 work_done = bnx2_rx_int(bp, orig_budget);
1931 *budget -= work_done;
1932 dev->quota -= work_done;
1935 bp->last_status_idx = bp->status_blk->status_idx;
1938 if (!bnx2_has_work(bp)) {
1939 netif_rx_complete(dev);
1940 if (likely(bp->flags & USING_MSI_FLAG)) {
1941 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1942 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1943 bp->last_status_idx);
1946 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1947 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1948 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1949 bp->last_status_idx);
1951 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1952 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1953 bp->last_status_idx);
1960 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1961 * from set_multicast.
1964 bnx2_set_rx_mode(struct net_device *dev)
1966 struct bnx2 *bp = netdev_priv(dev);
1967 u32 rx_mode, sort_mode;
1970 spin_lock_bh(&bp->phy_lock);
1972 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1973 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1974 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1976 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1977 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1979 if (!(bp->flags & ASF_ENABLE_FLAG))
1980 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1982 if (dev->flags & IFF_PROMISC) {
1983 /* Promiscuous mode. */
1984 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1985 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1987 else if (dev->flags & IFF_ALLMULTI) {
1988 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1989 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1992 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1995 /* Accept one or more multicast(s). */
1996 struct dev_mc_list *mclist;
1997 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2002 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2004 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2005 i++, mclist = mclist->next) {
2007 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2009 regidx = (bit & 0xe0) >> 5;
2011 mc_filter[regidx] |= (1 << bit);
2014 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2015 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2019 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2022 if (rx_mode != bp->rx_mode) {
2023 bp->rx_mode = rx_mode;
2024 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2027 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2028 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2029 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2031 spin_unlock_bh(&bp->phy_lock);
2035 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2042 for (i = 0; i < rv2p_code_len; i += 8) {
2043 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2045 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2048 if (rv2p_proc == RV2P_PROC1) {
2049 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2050 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2053 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2054 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2058 /* Reset the processor, un-stall is done later. */
2059 if (rv2p_proc == RV2P_PROC1) {
2060 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2063 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2068 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2074 val = REG_RD_IND(bp, cpu_reg->mode);
2075 val |= cpu_reg->mode_value_halt;
2076 REG_WR_IND(bp, cpu_reg->mode, val);
2077 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2079 /* Load the Text area. */
2080 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2084 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2085 REG_WR_IND(bp, offset, fw->text[j]);
2089 /* Load the Data area. */
2090 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2094 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2095 REG_WR_IND(bp, offset, fw->data[j]);
2099 /* Load the SBSS area. */
2100 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2104 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2105 REG_WR_IND(bp, offset, fw->sbss[j]);
2109 /* Load the BSS area. */
2110 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2114 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2115 REG_WR_IND(bp, offset, fw->bss[j]);
2119 /* Load the Read-Only area. */
2120 offset = cpu_reg->spad_base +
2121 (fw->rodata_addr - cpu_reg->mips_view_base);
2125 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2126 REG_WR_IND(bp, offset, fw->rodata[j]);
2130 /* Clear the pre-fetch instruction. */
2131 REG_WR_IND(bp, cpu_reg->inst, 0);
2132 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2134 /* Start the CPU. */
2135 val = REG_RD_IND(bp, cpu_reg->mode);
2136 val &= ~cpu_reg->mode_value_halt;
2137 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2138 REG_WR_IND(bp, cpu_reg->mode, val);
2142 bnx2_init_cpus(struct bnx2 *bp)
2144 struct cpu_reg cpu_reg;
2147 /* Initialize the RV2P processor. */
2148 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2149 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2151 /* Initialize the RX Processor. */
2152 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2153 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2154 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2155 cpu_reg.state = BNX2_RXP_CPU_STATE;
2156 cpu_reg.state_value_clear = 0xffffff;
2157 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2158 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2159 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2160 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2161 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2162 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2163 cpu_reg.mips_view_base = 0x8000000;
2165 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2166 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2167 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2168 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2170 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2171 fw.text_len = bnx2_RXP_b06FwTextLen;
2173 fw.text = bnx2_RXP_b06FwText;
2175 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2176 fw.data_len = bnx2_RXP_b06FwDataLen;
2178 fw.data = bnx2_RXP_b06FwData;
2180 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2181 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2183 fw.sbss = bnx2_RXP_b06FwSbss;
2185 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2186 fw.bss_len = bnx2_RXP_b06FwBssLen;
2188 fw.bss = bnx2_RXP_b06FwBss;
2190 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2191 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2192 fw.rodata_index = 0;
2193 fw.rodata = bnx2_RXP_b06FwRodata;
2195 load_cpu_fw(bp, &cpu_reg, &fw);
2197 /* Initialize the TX Processor. */
2198 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2199 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2200 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2201 cpu_reg.state = BNX2_TXP_CPU_STATE;
2202 cpu_reg.state_value_clear = 0xffffff;
2203 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2204 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2205 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2206 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2207 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2208 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2209 cpu_reg.mips_view_base = 0x8000000;
2211 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2212 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2213 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2214 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2216 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2217 fw.text_len = bnx2_TXP_b06FwTextLen;
2219 fw.text = bnx2_TXP_b06FwText;
2221 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2222 fw.data_len = bnx2_TXP_b06FwDataLen;
2224 fw.data = bnx2_TXP_b06FwData;
2226 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2227 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2229 fw.sbss = bnx2_TXP_b06FwSbss;
2231 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2232 fw.bss_len = bnx2_TXP_b06FwBssLen;
2234 fw.bss = bnx2_TXP_b06FwBss;
2236 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2237 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2238 fw.rodata_index = 0;
2239 fw.rodata = bnx2_TXP_b06FwRodata;
2241 load_cpu_fw(bp, &cpu_reg, &fw);
2243 /* Initialize the TX Patch-up Processor. */
2244 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2245 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2246 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2247 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2248 cpu_reg.state_value_clear = 0xffffff;
2249 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2250 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2251 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2252 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2253 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2254 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2255 cpu_reg.mips_view_base = 0x8000000;
2257 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2258 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2259 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2260 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2262 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2263 fw.text_len = bnx2_TPAT_b06FwTextLen;
2265 fw.text = bnx2_TPAT_b06FwText;
2267 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2268 fw.data_len = bnx2_TPAT_b06FwDataLen;
2270 fw.data = bnx2_TPAT_b06FwData;
2272 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2273 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2275 fw.sbss = bnx2_TPAT_b06FwSbss;
2277 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2278 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2280 fw.bss = bnx2_TPAT_b06FwBss;
2282 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2283 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2284 fw.rodata_index = 0;
2285 fw.rodata = bnx2_TPAT_b06FwRodata;
2287 load_cpu_fw(bp, &cpu_reg, &fw);
2289 /* Initialize the Completion Processor. */
2290 cpu_reg.mode = BNX2_COM_CPU_MODE;
2291 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2292 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2293 cpu_reg.state = BNX2_COM_CPU_STATE;
2294 cpu_reg.state_value_clear = 0xffffff;
2295 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2296 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2297 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2298 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2299 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2300 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2301 cpu_reg.mips_view_base = 0x8000000;
2303 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2304 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2305 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2306 fw.start_addr = bnx2_COM_b06FwStartAddr;
2308 fw.text_addr = bnx2_COM_b06FwTextAddr;
2309 fw.text_len = bnx2_COM_b06FwTextLen;
2311 fw.text = bnx2_COM_b06FwText;
2313 fw.data_addr = bnx2_COM_b06FwDataAddr;
2314 fw.data_len = bnx2_COM_b06FwDataLen;
2316 fw.data = bnx2_COM_b06FwData;
2318 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2319 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2321 fw.sbss = bnx2_COM_b06FwSbss;
2323 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2324 fw.bss_len = bnx2_COM_b06FwBssLen;
2326 fw.bss = bnx2_COM_b06FwBss;
2328 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2329 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2330 fw.rodata_index = 0;
2331 fw.rodata = bnx2_COM_b06FwRodata;
2333 load_cpu_fw(bp, &cpu_reg, &fw);
2338 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2342 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2348 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2349 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2350 PCI_PM_CTRL_PME_STATUS);
2352 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2353 /* delay required during transition out of D3hot */
2356 val = REG_RD(bp, BNX2_EMAC_MODE);
2357 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2358 val &= ~BNX2_EMAC_MODE_MPKT;
2359 REG_WR(bp, BNX2_EMAC_MODE, val);
2361 val = REG_RD(bp, BNX2_RPM_CONFIG);
2362 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2363 REG_WR(bp, BNX2_RPM_CONFIG, val);
2374 autoneg = bp->autoneg;
2375 advertising = bp->advertising;
2377 bp->autoneg = AUTONEG_SPEED;
2378 bp->advertising = ADVERTISED_10baseT_Half |
2379 ADVERTISED_10baseT_Full |
2380 ADVERTISED_100baseT_Half |
2381 ADVERTISED_100baseT_Full |
2384 bnx2_setup_copper_phy(bp);
2386 bp->autoneg = autoneg;
2387 bp->advertising = advertising;
2389 bnx2_set_mac_addr(bp);
2391 val = REG_RD(bp, BNX2_EMAC_MODE);
2393 /* Enable port mode. */
2394 val &= ~BNX2_EMAC_MODE_PORT;
2395 val |= BNX2_EMAC_MODE_PORT_MII |
2396 BNX2_EMAC_MODE_MPKT_RCVD |
2397 BNX2_EMAC_MODE_ACPI_RCVD |
2398 BNX2_EMAC_MODE_MPKT;
2400 REG_WR(bp, BNX2_EMAC_MODE, val);
2402 /* receive all multicast */
2403 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2404 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2407 REG_WR(bp, BNX2_EMAC_RX_MODE,
2408 BNX2_EMAC_RX_MODE_SORT_MODE);
2410 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2411 BNX2_RPM_SORT_USER0_MC_EN;
2412 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2413 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2414 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2415 BNX2_RPM_SORT_USER0_ENA);
2417 /* Need to enable EMAC and RPM for WOL. */
2418 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2419 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2420 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2421 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2423 val = REG_RD(bp, BNX2_RPM_CONFIG);
2424 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2425 REG_WR(bp, BNX2_RPM_CONFIG, val);
2427 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2430 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2433 if (!(bp->flags & NO_WOL_FLAG))
2434 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2436 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2437 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2438 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2447 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2449 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2452 /* No more memory access after this point until
2453 * device is brought back to D0.
2465 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2470 /* Request access to the flash interface. */
2471 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2472 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2473 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2474 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2480 if (j >= NVRAM_TIMEOUT_COUNT)
2487 bnx2_release_nvram_lock(struct bnx2 *bp)
2492 /* Relinquish nvram interface. */
2493 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2495 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2496 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2497 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2503 if (j >= NVRAM_TIMEOUT_COUNT)
2511 bnx2_enable_nvram_write(struct bnx2 *bp)
2515 val = REG_RD(bp, BNX2_MISC_CFG);
2516 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2518 if (!bp->flash_info->buffered) {
2521 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2522 REG_WR(bp, BNX2_NVM_COMMAND,
2523 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2525 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2528 val = REG_RD(bp, BNX2_NVM_COMMAND);
2529 if (val & BNX2_NVM_COMMAND_DONE)
2533 if (j >= NVRAM_TIMEOUT_COUNT)
2540 bnx2_disable_nvram_write(struct bnx2 *bp)
2544 val = REG_RD(bp, BNX2_MISC_CFG);
2545 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2550 bnx2_enable_nvram_access(struct bnx2 *bp)
2554 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2555 /* Enable both bits, even on read. */
2556 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2557 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2561 bnx2_disable_nvram_access(struct bnx2 *bp)
2565 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2566 /* Disable both bits, even after read. */
2567 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2568 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2569 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2573 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2578 if (bp->flash_info->buffered)
2579 /* Buffered flash, no erase needed */
2582 /* Build an erase command */
2583 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2584 BNX2_NVM_COMMAND_DOIT;
2586 /* Need to clear DONE bit separately. */
2587 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2589 /* Address of the NVRAM to read from. */
2590 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2592 /* Issue an erase command. */
2593 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2595 /* Wait for completion. */
2596 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2601 val = REG_RD(bp, BNX2_NVM_COMMAND);
2602 if (val & BNX2_NVM_COMMAND_DONE)
2606 if (j >= NVRAM_TIMEOUT_COUNT)
2613 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2618 /* Build the command word. */
2619 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2621 /* Calculate an offset of a buffered flash. */
2622 if (bp->flash_info->buffered) {
2623 offset = ((offset / bp->flash_info->page_size) <<
2624 bp->flash_info->page_bits) +
2625 (offset % bp->flash_info->page_size);
2628 /* Need to clear DONE bit separately. */
2629 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2631 /* Address of the NVRAM to read from. */
2632 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2634 /* Issue a read command. */
2635 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2637 /* Wait for completion. */
2638 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2643 val = REG_RD(bp, BNX2_NVM_COMMAND);
2644 if (val & BNX2_NVM_COMMAND_DONE) {
2645 val = REG_RD(bp, BNX2_NVM_READ);
2647 val = be32_to_cpu(val);
2648 memcpy(ret_val, &val, 4);
2652 if (j >= NVRAM_TIMEOUT_COUNT)
2660 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2665 /* Build the command word. */
2666 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2668 /* Calculate an offset of a buffered flash. */
2669 if (bp->flash_info->buffered) {
2670 offset = ((offset / bp->flash_info->page_size) <<
2671 bp->flash_info->page_bits) +
2672 (offset % bp->flash_info->page_size);
2675 /* Need to clear DONE bit separately. */
2676 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2678 memcpy(&val32, val, 4);
2679 val32 = cpu_to_be32(val32);
2681 /* Write the data. */
2682 REG_WR(bp, BNX2_NVM_WRITE, val32);
2684 /* Address of the NVRAM to write to. */
2685 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2687 /* Issue the write command. */
2688 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2690 /* Wait for completion. */
2691 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2694 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2697 if (j >= NVRAM_TIMEOUT_COUNT)
2704 bnx2_init_nvram(struct bnx2 *bp)
2707 int j, entry_count, rc;
2708 struct flash_spec *flash;
2710 /* Determine the selected interface. */
2711 val = REG_RD(bp, BNX2_NVM_CFG1);
2713 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2716 if (val & 0x40000000) {
2718 /* Flash interface has been reconfigured */
2719 for (j = 0, flash = &flash_table[0]; j < entry_count;
2721 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2722 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2723 bp->flash_info = flash;
2730 /* Not yet been reconfigured */
2732 if (val & (1 << 23))
2733 mask = FLASH_BACKUP_STRAP_MASK;
2735 mask = FLASH_STRAP_MASK;
2737 for (j = 0, flash = &flash_table[0]; j < entry_count;
2740 if ((val & mask) == (flash->strapping & mask)) {
2741 bp->flash_info = flash;
2743 /* Request access to the flash interface. */
2744 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2747 /* Enable access to flash interface */
2748 bnx2_enable_nvram_access(bp);
2750 /* Reconfigure the flash interface */
2751 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2752 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2753 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2754 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2756 /* Disable access to flash interface */
2757 bnx2_disable_nvram_access(bp);
2758 bnx2_release_nvram_lock(bp);
2763 } /* if (val & 0x40000000) */
2765 if (j == entry_count) {
2766 bp->flash_info = NULL;
2767 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2771 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2772 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2774 bp->flash_size = val;
2776 bp->flash_size = bp->flash_info->total_size;
2782 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2786 u32 cmd_flags, offset32, len32, extra;
2791 /* Request access to the flash interface. */
2792 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2795 /* Enable access to flash interface */
2796 bnx2_enable_nvram_access(bp);
2809 pre_len = 4 - (offset & 3);
2811 if (pre_len >= len32) {
2813 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2814 BNX2_NVM_COMMAND_LAST;
2817 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2820 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2825 memcpy(ret_buf, buf + (offset & 3), pre_len);
2832 extra = 4 - (len32 & 3);
2833 len32 = (len32 + 4) & ~3;
2840 cmd_flags = BNX2_NVM_COMMAND_LAST;
2842 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2843 BNX2_NVM_COMMAND_LAST;
2845 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2847 memcpy(ret_buf, buf, 4 - extra);
2849 else if (len32 > 0) {
2852 /* Read the first word. */
2856 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2858 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2860 /* Advance to the next dword. */
2865 while (len32 > 4 && rc == 0) {
2866 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2868 /* Advance to the next dword. */
2877 cmd_flags = BNX2_NVM_COMMAND_LAST;
2878 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2880 memcpy(ret_buf, buf, 4 - extra);
2883 /* Disable access to flash interface */
2884 bnx2_disable_nvram_access(bp);
2886 bnx2_release_nvram_lock(bp);
2892 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2895 u32 written, offset32, len32;
2896 u8 *buf, start[4], end[4];
2898 int align_start, align_end;
2903 align_start = align_end = 0;
2905 if ((align_start = (offset32 & 3))) {
2907 len32 += align_start;
2908 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2913 if ((len32 > 4) || !align_start) {
2914 align_end = 4 - (len32 & 3);
2916 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2923 if (align_start || align_end) {
2924 buf = kmalloc(len32, GFP_KERNEL);
2928 memcpy(buf, start, 4);
2931 memcpy(buf + len32 - 4, end, 4);
2933 memcpy(buf + align_start, data_buf, buf_size);
2937 while ((written < len32) && (rc == 0)) {
2938 u32 page_start, page_end, data_start, data_end;
2939 u32 addr, cmd_flags;
2941 u8 flash_buffer[264];
2943 /* Find the page_start addr */
2944 page_start = offset32 + written;
2945 page_start -= (page_start % bp->flash_info->page_size);
2946 /* Find the page_end addr */
2947 page_end = page_start + bp->flash_info->page_size;
2948 /* Find the data_start addr */
2949 data_start = (written == 0) ? offset32 : page_start;
2950 /* Find the data_end addr */
2951 data_end = (page_end > offset32 + len32) ?
2952 (offset32 + len32) : page_end;
2954 /* Request access to the flash interface. */
2955 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2956 goto nvram_write_end;
2958 /* Enable access to flash interface */
2959 bnx2_enable_nvram_access(bp);
2961 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2962 if (bp->flash_info->buffered == 0) {
2965 /* Read the whole page into the buffer
2966 * (non-buffer flash only) */
2967 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2968 if (j == (bp->flash_info->page_size - 4)) {
2969 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2971 rc = bnx2_nvram_read_dword(bp,
2977 goto nvram_write_end;
2983 /* Enable writes to flash interface (unlock write-protect) */
2984 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2985 goto nvram_write_end;
2987 /* Erase the page */
2988 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2989 goto nvram_write_end;
2991 /* Re-enable the write again for the actual write */
2992 bnx2_enable_nvram_write(bp);
2994 /* Loop to write back the buffer data from page_start to
2997 if (bp->flash_info->buffered == 0) {
2998 for (addr = page_start; addr < data_start;
2999 addr += 4, i += 4) {
3001 rc = bnx2_nvram_write_dword(bp, addr,
3002 &flash_buffer[i], cmd_flags);
3005 goto nvram_write_end;
3011 /* Loop to write the new data from data_start to data_end */
3012 for (addr = data_start; addr < data_end; addr += 4, i++) {
3013 if ((addr == page_end - 4) ||
3014 ((bp->flash_info->buffered) &&
3015 (addr == data_end - 4))) {
3017 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3019 rc = bnx2_nvram_write_dword(bp, addr, buf,
3023 goto nvram_write_end;
3029 /* Loop to write back the buffer data from data_end
3031 if (bp->flash_info->buffered == 0) {
3032 for (addr = data_end; addr < page_end;
3033 addr += 4, i += 4) {
3035 if (addr == page_end-4) {
3036 cmd_flags = BNX2_NVM_COMMAND_LAST;
3038 rc = bnx2_nvram_write_dword(bp, addr,
3039 &flash_buffer[i], cmd_flags);
3042 goto nvram_write_end;
3048 /* Disable writes to flash interface (lock write-protect) */
3049 bnx2_disable_nvram_write(bp);
3051 /* Disable access to flash interface */
3052 bnx2_disable_nvram_access(bp);
3053 bnx2_release_nvram_lock(bp);
3055 /* Increment written */
3056 written += data_end - data_start;
3060 if (align_start || align_end)
3066 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3071 /* Wait for the current PCI transaction to complete before
3072 * issuing a reset. */
3073 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3074 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3075 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3076 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3077 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3078 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3081 /* Wait for the firmware to tell us it is ok to issue a reset. */
3082 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3084 /* Deposit a driver reset signature so the firmware knows that
3085 * this is a soft reset. */
3086 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3087 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3089 /* Do a dummy read to force the chip to complete all current transaction
3090 * before we issue a reset. */
3091 val = REG_RD(bp, BNX2_MISC_ID);
3093 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3094 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3095 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3098 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3100 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3101 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3104 /* Reset takes approximate 30 usec */
3105 for (i = 0; i < 10; i++) {
3106 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3107 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3108 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3114 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3115 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3116 printk(KERN_ERR PFX "Chip reset did not complete\n");
3120 /* Make sure byte swapping is properly configured. */
3121 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3122 if (val != 0x01020304) {
3123 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3127 /* Wait for the firmware to finish its initialization. */
3128 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3132 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3133 /* Adjust the voltage regular to two steps lower. The default
3134 * of this register is 0x0000000e. */
3135 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3137 /* Remove bad rbuf memory from the free pool. */
3138 rc = bnx2_alloc_bad_rbuf(bp);
3145 bnx2_init_chip(struct bnx2 *bp)
3150 /* Make sure the interrupt is not active. */
3151 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3153 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3154 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3156 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3158 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3159 DMA_READ_CHANS << 12 |
3160 DMA_WRITE_CHANS << 16;
3162 val |= (0x2 << 20) | (1 << 11);
3164 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3167 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3168 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3169 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3171 REG_WR(bp, BNX2_DMA_CONFIG, val);
3173 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3174 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3175 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3176 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3179 if (bp->flags & PCIX_FLAG) {
3182 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3184 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3185 val16 & ~PCI_X_CMD_ERO);
3188 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3189 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3190 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3191 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3193 /* Initialize context mapping and zero out the quick contexts. The
3194 * context block must have already been enabled. */
3195 bnx2_init_context(bp);
3198 bnx2_init_nvram(bp);
3200 bnx2_set_mac_addr(bp);
3202 val = REG_RD(bp, BNX2_MQ_CONFIG);
3203 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3204 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3205 REG_WR(bp, BNX2_MQ_CONFIG, val);
3207 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3208 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3209 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3211 val = (BCM_PAGE_BITS - 8) << 24;
3212 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3214 /* Configure page size. */
3215 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3216 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3217 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3218 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3220 val = bp->mac_addr[0] +
3221 (bp->mac_addr[1] << 8) +
3222 (bp->mac_addr[2] << 16) +
3224 (bp->mac_addr[4] << 8) +
3225 (bp->mac_addr[5] << 16);
3226 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3228 /* Program the MTU. Also include 4 bytes for CRC32. */
3229 val = bp->dev->mtu + ETH_HLEN + 4;
3230 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3231 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3232 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3234 bp->last_status_idx = 0;
3235 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3237 /* Set up how to generate a link change interrupt. */
3238 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3240 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3241 (u64) bp->status_blk_mapping & 0xffffffff);
3242 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3244 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3245 (u64) bp->stats_blk_mapping & 0xffffffff);
3246 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3247 (u64) bp->stats_blk_mapping >> 32);
3249 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3250 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3252 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3253 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3255 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3256 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3258 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3260 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3262 REG_WR(bp, BNX2_HC_COM_TICKS,
3263 (bp->com_ticks_int << 16) | bp->com_ticks);
3265 REG_WR(bp, BNX2_HC_CMD_TICKS,
3266 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3268 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3269 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3271 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3272 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3274 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3275 BNX2_HC_CONFIG_TX_TMR_MODE |
3276 BNX2_HC_CONFIG_COLLECT_STATS);
3279 /* Clear internal stats counters. */
3280 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3282 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3284 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3285 BNX2_PORT_FEATURE_ASF_ENABLED)
3286 bp->flags |= ASF_ENABLE_FLAG;
3288 /* Initialize the receive filter. */
3289 bnx2_set_rx_mode(bp->dev);
3291 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3294 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3295 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3304 bnx2_init_tx_ring(struct bnx2 *bp)
3309 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3311 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3312 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3317 bp->tx_prod_bseq = 0;
3319 val = BNX2_L2CTX_TYPE_TYPE_L2;
3320 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3321 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3323 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3325 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3327 val = (u64) bp->tx_desc_mapping >> 32;
3328 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3330 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3331 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3335 bnx2_init_rx_ring(struct bnx2 *bp)
3339 u16 prod, ring_prod;
3342 /* 8 for CRC and VLAN */
3343 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3344 /* 8 for alignment */
3345 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3347 ring_prod = prod = bp->rx_prod = 0;
3350 bp->rx_prod_bseq = 0;
3352 rxbd = &bp->rx_desc_ring[0];
3353 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3354 rxbd->rx_bd_len = bp->rx_buf_use_size;
3355 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3358 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3359 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3361 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3362 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3364 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3366 val = (u64) bp->rx_desc_mapping >> 32;
3367 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3369 val = (u64) bp->rx_desc_mapping & 0xffffffff;
3370 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3372 for (i = 0; i < bp->rx_ring_size; i++) {
3373 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3376 prod = NEXT_RX_BD(prod);
3377 ring_prod = RX_RING_IDX(prod);
3381 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3383 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3387 bnx2_free_tx_skbs(struct bnx2 *bp)
3391 if (bp->tx_buf_ring == NULL)
3394 for (i = 0; i < TX_DESC_CNT; ) {
3395 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3396 struct sk_buff *skb = tx_buf->skb;
3404 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3405 skb_headlen(skb), PCI_DMA_TODEVICE);
3409 last = skb_shinfo(skb)->nr_frags;
3410 for (j = 0; j < last; j++) {
3411 tx_buf = &bp->tx_buf_ring[i + j + 1];
3412 pci_unmap_page(bp->pdev,
3413 pci_unmap_addr(tx_buf, mapping),
3414 skb_shinfo(skb)->frags[j].size,
3417 dev_kfree_skb_any(skb);
3424 bnx2_free_rx_skbs(struct bnx2 *bp)
3428 if (bp->rx_buf_ring == NULL)
3431 for (i = 0; i < RX_DESC_CNT; i++) {
3432 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3433 struct sk_buff *skb = rx_buf->skb;
3438 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3439 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3443 dev_kfree_skb_any(skb);
3448 bnx2_free_skbs(struct bnx2 *bp)
3450 bnx2_free_tx_skbs(bp);
3451 bnx2_free_rx_skbs(bp);
3455 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3459 rc = bnx2_reset_chip(bp, reset_code);
3465 bnx2_init_tx_ring(bp);
3466 bnx2_init_rx_ring(bp);
3471 bnx2_init_nic(struct bnx2 *bp)
3475 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3484 bnx2_test_registers(struct bnx2 *bp)
3488 static const struct {
3494 { 0x006c, 0, 0x00000000, 0x0000003f },
3495 { 0x0090, 0, 0xffffffff, 0x00000000 },
3496 { 0x0094, 0, 0x00000000, 0x00000000 },
3498 { 0x0404, 0, 0x00003f00, 0x00000000 },
3499 { 0x0418, 0, 0x00000000, 0xffffffff },
3500 { 0x041c, 0, 0x00000000, 0xffffffff },
3501 { 0x0420, 0, 0x00000000, 0x80ffffff },
3502 { 0x0424, 0, 0x00000000, 0x00000000 },
3503 { 0x0428, 0, 0x00000000, 0x00000001 },
3504 { 0x0450, 0, 0x00000000, 0x0000ffff },
3505 { 0x0454, 0, 0x00000000, 0xffffffff },
3506 { 0x0458, 0, 0x00000000, 0xffffffff },
3508 { 0x0808, 0, 0x00000000, 0xffffffff },
3509 { 0x0854, 0, 0x00000000, 0xffffffff },
3510 { 0x0868, 0, 0x00000000, 0x77777777 },
3511 { 0x086c, 0, 0x00000000, 0x77777777 },
3512 { 0x0870, 0, 0x00000000, 0x77777777 },
3513 { 0x0874, 0, 0x00000000, 0x77777777 },
3515 { 0x0c00, 0, 0x00000000, 0x00000001 },
3516 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3517 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3519 { 0x1000, 0, 0x00000000, 0x00000001 },
3520 { 0x1004, 0, 0x00000000, 0x000f0001 },
3522 { 0x1408, 0, 0x01c00800, 0x00000000 },
3523 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3524 { 0x14a8, 0, 0x00000000, 0x000001ff },
3525 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3526 { 0x14b0, 0, 0x00000002, 0x00000001 },
3527 { 0x14b8, 0, 0x00000000, 0x00000000 },
3528 { 0x14c0, 0, 0x00000000, 0x00000009 },
3529 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3530 { 0x14cc, 0, 0x00000000, 0x00000001 },
3531 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3533 { 0x1800, 0, 0x00000000, 0x00000001 },
3534 { 0x1804, 0, 0x00000000, 0x00000003 },
3536 { 0x2800, 0, 0x00000000, 0x00000001 },
3537 { 0x2804, 0, 0x00000000, 0x00003f01 },
3538 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3539 { 0x2810, 0, 0xffff0000, 0x00000000 },
3540 { 0x2814, 0, 0xffff0000, 0x00000000 },
3541 { 0x2818, 0, 0xffff0000, 0x00000000 },
3542 { 0x281c, 0, 0xffff0000, 0x00000000 },
3543 { 0x2834, 0, 0xffffffff, 0x00000000 },
3544 { 0x2840, 0, 0x00000000, 0xffffffff },
3545 { 0x2844, 0, 0x00000000, 0xffffffff },
3546 { 0x2848, 0, 0xffffffff, 0x00000000 },
3547 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3549 { 0x2c00, 0, 0x00000000, 0x00000011 },
3550 { 0x2c04, 0, 0x00000000, 0x00030007 },
3552 { 0x3c00, 0, 0x00000000, 0x00000001 },
3553 { 0x3c04, 0, 0x00000000, 0x00070000 },
3554 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3555 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3556 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3557 { 0x3c14, 0, 0x00000000, 0xffffffff },
3558 { 0x3c18, 0, 0x00000000, 0xffffffff },
3559 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3560 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3562 { 0x5004, 0, 0x00000000, 0x0000007f },
3563 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3564 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3566 { 0x5c00, 0, 0x00000000, 0x00000001 },
3567 { 0x5c04, 0, 0x00000000, 0x0003000f },
3568 { 0x5c08, 0, 0x00000003, 0x00000000 },
3569 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3570 { 0x5c10, 0, 0x00000000, 0xffffffff },
3571 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3572 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3573 { 0x5c88, 0, 0x00000000, 0x00077373 },
3574 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3576 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3577 { 0x680c, 0, 0xffffffff, 0x00000000 },
3578 { 0x6810, 0, 0xffffffff, 0x00000000 },
3579 { 0x6814, 0, 0xffffffff, 0x00000000 },
3580 { 0x6818, 0, 0xffffffff, 0x00000000 },
3581 { 0x681c, 0, 0xffffffff, 0x00000000 },
3582 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3583 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3584 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3585 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3586 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3587 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3588 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3589 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3590 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3591 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3592 { 0x684c, 0, 0xffffffff, 0x00000000 },
3593 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3594 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3595 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3596 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3597 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3598 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3600 { 0xffff, 0, 0x00000000, 0x00000000 },
3604 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3605 u32 offset, rw_mask, ro_mask, save_val, val;
3607 offset = (u32) reg_tbl[i].offset;
3608 rw_mask = reg_tbl[i].rw_mask;
3609 ro_mask = reg_tbl[i].ro_mask;
3611 save_val = readl(bp->regview + offset);
3613 writel(0, bp->regview + offset);
3615 val = readl(bp->regview + offset);
3616 if ((val & rw_mask) != 0) {
3620 if ((val & ro_mask) != (save_val & ro_mask)) {
3624 writel(0xffffffff, bp->regview + offset);
3626 val = readl(bp->regview + offset);
3627 if ((val & rw_mask) != rw_mask) {
3631 if ((val & ro_mask) != (save_val & ro_mask)) {
3635 writel(save_val, bp->regview + offset);
3639 writel(save_val, bp->regview + offset);
3647 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3649 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3650 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3653 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3656 for (offset = 0; offset < size; offset += 4) {
3658 REG_WR_IND(bp, start + offset, test_pattern[i]);
3660 if (REG_RD_IND(bp, start + offset) !=
3670 bnx2_test_memory(struct bnx2 *bp)
3674 static const struct {
3678 { 0x60000, 0x4000 },
3679 { 0xa0000, 0x3000 },
3680 { 0xe0000, 0x4000 },
3681 { 0x120000, 0x4000 },
3682 { 0x1a0000, 0x4000 },
3683 { 0x160000, 0x4000 },
3687 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3688 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3689 mem_tbl[i].len)) != 0) {
3697 #define BNX2_MAC_LOOPBACK 0
3698 #define BNX2_PHY_LOOPBACK 1
3701 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3703 unsigned int pkt_size, num_pkts, i;
3704 struct sk_buff *skb, *rx_skb;
3705 unsigned char *packet;
3706 u16 rx_start_idx, rx_idx;
3710 struct sw_bd *rx_buf;
3711 struct l2_fhdr *rx_hdr;
3714 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3715 bp->loopback = MAC_LOOPBACK;
3716 bnx2_set_mac_loopback(bp);
3718 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3720 bnx2_set_phy_loopback(bp);
3726 skb = dev_alloc_skb(pkt_size);
3729 packet = skb_put(skb, pkt_size);
3730 memcpy(packet, bp->mac_addr, 6);
3731 memset(packet + 6, 0x0, 8);
3732 for (i = 14; i < pkt_size; i++)
3733 packet[i] = (unsigned char) (i & 0xff);
3735 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3738 val = REG_RD(bp, BNX2_HC_COMMAND);
3739 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3740 REG_RD(bp, BNX2_HC_COMMAND);
3743 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3747 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3749 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3750 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3751 txbd->tx_bd_mss_nbytes = pkt_size;
3752 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3755 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3756 bp->tx_prod_bseq += pkt_size;
3758 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3759 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3763 val = REG_RD(bp, BNX2_HC_COMMAND);
3764 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3765 REG_RD(bp, BNX2_HC_COMMAND);
3769 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3770 dev_kfree_skb_irq(skb);
3772 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3773 goto loopback_test_done;
3776 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3777 if (rx_idx != rx_start_idx + num_pkts) {
3778 goto loopback_test_done;
3781 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3782 rx_skb = rx_buf->skb;
3784 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3785 skb_reserve(rx_skb, bp->rx_offset);
3787 pci_dma_sync_single_for_cpu(bp->pdev,
3788 pci_unmap_addr(rx_buf, mapping),
3789 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3791 if (rx_hdr->l2_fhdr_status &
3792 (L2_FHDR_ERRORS_BAD_CRC |
3793 L2_FHDR_ERRORS_PHY_DECODE |
3794 L2_FHDR_ERRORS_ALIGNMENT |
3795 L2_FHDR_ERRORS_TOO_SHORT |
3796 L2_FHDR_ERRORS_GIANT_FRAME)) {
3798 goto loopback_test_done;
3801 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3802 goto loopback_test_done;
3805 for (i = 14; i < pkt_size; i++) {
3806 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3807 goto loopback_test_done;
3818 #define BNX2_MAC_LOOPBACK_FAILED 1
3819 #define BNX2_PHY_LOOPBACK_FAILED 2
3820 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3821 BNX2_PHY_LOOPBACK_FAILED)
3824 bnx2_test_loopback(struct bnx2 *bp)
3828 if (!netif_running(bp->dev))
3829 return BNX2_LOOPBACK_FAILED;
3831 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3832 spin_lock_bh(&bp->phy_lock);
3834 spin_unlock_bh(&bp->phy_lock);
3835 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3836 rc |= BNX2_MAC_LOOPBACK_FAILED;
3837 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3838 rc |= BNX2_PHY_LOOPBACK_FAILED;
3842 #define NVRAM_SIZE 0x200
3843 #define CRC32_RESIDUAL 0xdebb20e3
3846 bnx2_test_nvram(struct bnx2 *bp)
3848 u32 buf[NVRAM_SIZE / 4];
3849 u8 *data = (u8 *) buf;
3853 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3854 goto test_nvram_done;
3856 magic = be32_to_cpu(buf[0]);
3857 if (magic != 0x669955aa) {
3859 goto test_nvram_done;
3862 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3863 goto test_nvram_done;
3865 csum = ether_crc_le(0x100, data);
3866 if (csum != CRC32_RESIDUAL) {
3868 goto test_nvram_done;
3871 csum = ether_crc_le(0x100, data + 0x100);
3872 if (csum != CRC32_RESIDUAL) {
3881 bnx2_test_link(struct bnx2 *bp)
3885 spin_lock_bh(&bp->phy_lock);
3886 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3887 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3888 spin_unlock_bh(&bp->phy_lock);
3890 if (bmsr & BMSR_LSTATUS) {
3897 bnx2_test_intr(struct bnx2 *bp)
3903 if (!netif_running(bp->dev))
3906 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3908 /* This register is not touched during run-time. */
3909 val = REG_RD(bp, BNX2_HC_COMMAND);
3910 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3911 REG_RD(bp, BNX2_HC_COMMAND);
3913 for (i = 0; i < 10; i++) {
3914 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3920 msleep_interruptible(10);
3929 bnx2_timer(unsigned long data)
3931 struct bnx2 *bp = (struct bnx2 *) data;
3934 if (!netif_running(bp->dev))
3937 if (atomic_read(&bp->intr_sem) != 0)
3938 goto bnx2_restart_timer;
3940 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3941 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3944 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3946 spin_lock(&bp->phy_lock);
3947 if (bp->serdes_an_pending) {
3948 bp->serdes_an_pending--;
3950 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3953 bp->current_interval = bp->timer_interval;
3955 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3957 if (bmcr & BMCR_ANENABLE) {
3960 bnx2_write_phy(bp, 0x1c, 0x7c00);
3961 bnx2_read_phy(bp, 0x1c, &phy1);
3963 bnx2_write_phy(bp, 0x17, 0x0f01);
3964 bnx2_read_phy(bp, 0x15, &phy2);
3965 bnx2_write_phy(bp, 0x17, 0x0f01);
3966 bnx2_read_phy(bp, 0x15, &phy2);
3968 if ((phy1 & 0x10) && /* SIGNAL DETECT */
3969 !(phy2 & 0x20)) { /* no CONFIG */
3971 bmcr &= ~BMCR_ANENABLE;
3972 bmcr |= BMCR_SPEED1000 |
3974 bnx2_write_phy(bp, MII_BMCR, bmcr);
3976 PHY_PARALLEL_DETECT_FLAG;
3980 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
3981 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
3984 bnx2_write_phy(bp, 0x17, 0x0f01);
3985 bnx2_read_phy(bp, 0x15, &phy2);
3989 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3990 bmcr |= BMCR_ANENABLE;
3991 bnx2_write_phy(bp, MII_BMCR, bmcr);
3993 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
3998 bp->current_interval = bp->timer_interval;
4000 spin_unlock(&bp->phy_lock);
4004 mod_timer(&bp->timer, jiffies + bp->current_interval);
4007 /* Called with rtnl_lock */
4009 bnx2_open(struct net_device *dev)
4011 struct bnx2 *bp = netdev_priv(dev);
4014 bnx2_set_power_state(bp, PCI_D0);
4015 bnx2_disable_int(bp);
4017 rc = bnx2_alloc_mem(bp);
4021 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4022 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4025 if (pci_enable_msi(bp->pdev) == 0) {
4026 bp->flags |= USING_MSI_FLAG;
4027 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4031 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4032 SA_SHIRQ, dev->name, dev);
4036 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4044 rc = bnx2_init_nic(bp);
4047 free_irq(bp->pdev->irq, dev);
4048 if (bp->flags & USING_MSI_FLAG) {
4049 pci_disable_msi(bp->pdev);
4050 bp->flags &= ~USING_MSI_FLAG;
4057 mod_timer(&bp->timer, jiffies + bp->current_interval);
4059 atomic_set(&bp->intr_sem, 0);
4061 bnx2_enable_int(bp);
4063 if (bp->flags & USING_MSI_FLAG) {
4064 /* Test MSI to make sure it is working
4065 * If MSI test fails, go back to INTx mode
4067 if (bnx2_test_intr(bp) != 0) {
4068 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4069 " using MSI, switching to INTx mode. Please"
4070 " report this failure to the PCI maintainer"
4071 " and include system chipset information.\n",
4074 bnx2_disable_int(bp);
4075 free_irq(bp->pdev->irq, dev);
4076 pci_disable_msi(bp->pdev);
4077 bp->flags &= ~USING_MSI_FLAG;
4079 rc = bnx2_init_nic(bp);
4082 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4083 SA_SHIRQ, dev->name, dev);
4088 del_timer_sync(&bp->timer);
4091 bnx2_enable_int(bp);
4094 if (bp->flags & USING_MSI_FLAG) {
4095 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4098 netif_start_queue(dev);
4104 bnx2_reset_task(void *data)
4106 struct bnx2 *bp = data;
4108 if (!netif_running(bp->dev))
4111 bp->in_reset_task = 1;
4112 bnx2_netif_stop(bp);
4116 atomic_set(&bp->intr_sem, 1);
4117 bnx2_netif_start(bp);
4118 bp->in_reset_task = 0;
4122 bnx2_tx_timeout(struct net_device *dev)
4124 struct bnx2 *bp = netdev_priv(dev);
4126 /* This allows the netif to be shutdown gracefully before resetting */
4127 schedule_work(&bp->reset_task);
4131 /* Called with rtnl_lock */
4133 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4135 struct bnx2 *bp = netdev_priv(dev);
4137 bnx2_netif_stop(bp);
4140 bnx2_set_rx_mode(dev);
4142 bnx2_netif_start(bp);
4145 /* Called with rtnl_lock */
4147 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4149 struct bnx2 *bp = netdev_priv(dev);
4151 bnx2_netif_stop(bp);
4154 bp->vlgrp->vlan_devices[vid] = NULL;
4155 bnx2_set_rx_mode(dev);
4157 bnx2_netif_start(bp);
4161 /* Called with dev->xmit_lock.
4162 * hard_start_xmit is pseudo-lockless - a lock is only required when
4163 * the tx queue is full. This way, we get the benefit of lockless
4164 * operations most of the time without the complexities to handle
4165 * netif_stop_queue/wake_queue race conditions.
4168 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4170 struct bnx2 *bp = netdev_priv(dev);
4173 struct sw_bd *tx_buf;
4174 u32 len, vlan_tag_flags, last_frag, mss;
4175 u16 prod, ring_prod;
4178 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4179 netif_stop_queue(dev);
4180 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4183 return NETDEV_TX_BUSY;
4185 len = skb_headlen(skb);
4187 ring_prod = TX_RING_IDX(prod);
4190 if (skb->ip_summed == CHECKSUM_HW) {
4191 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4194 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4196 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4199 if ((mss = skb_shinfo(skb)->tso_size) &&
4200 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4201 u32 tcp_opt_len, ip_tcp_len;
4203 if (skb_header_cloned(skb) &&
4204 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4206 return NETDEV_TX_OK;
4209 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4210 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4213 if (skb->h.th->doff > 5) {
4214 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4216 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4218 skb->nh.iph->check = 0;
4219 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4221 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4225 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4226 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4227 (tcp_opt_len >> 2)) << 8;
4236 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4238 tx_buf = &bp->tx_buf_ring[ring_prod];
4240 pci_unmap_addr_set(tx_buf, mapping, mapping);
4242 txbd = &bp->tx_desc_ring[ring_prod];
4244 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4245 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4246 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4247 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4249 last_frag = skb_shinfo(skb)->nr_frags;
4251 for (i = 0; i < last_frag; i++) {
4252 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4254 prod = NEXT_TX_BD(prod);
4255 ring_prod = TX_RING_IDX(prod);
4256 txbd = &bp->tx_desc_ring[ring_prod];
4259 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4260 len, PCI_DMA_TODEVICE);
4261 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4264 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4265 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4266 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4267 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4270 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4272 prod = NEXT_TX_BD(prod);
4273 bp->tx_prod_bseq += skb->len;
4275 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4276 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4281 dev->trans_start = jiffies;
4283 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4284 spin_lock(&bp->tx_lock);
4285 netif_stop_queue(dev);
4287 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4288 netif_wake_queue(dev);
4289 spin_unlock(&bp->tx_lock);
4292 return NETDEV_TX_OK;
4295 /* Called with rtnl_lock */
4297 bnx2_close(struct net_device *dev)
4299 struct bnx2 *bp = netdev_priv(dev);
4302 /* Calling flush_scheduled_work() may deadlock because
4303 * linkwatch_event() may be on the workqueue and it will try to get
4304 * the rtnl_lock which we are holding.
4306 while (bp->in_reset_task)
4309 bnx2_netif_stop(bp);
4310 del_timer_sync(&bp->timer);
4311 if (bp->flags & NO_WOL_FLAG)
4312 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4314 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4316 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4317 bnx2_reset_chip(bp, reset_code);
4318 free_irq(bp->pdev->irq, dev);
4319 if (bp->flags & USING_MSI_FLAG) {
4320 pci_disable_msi(bp->pdev);
4321 bp->flags &= ~USING_MSI_FLAG;
4326 netif_carrier_off(bp->dev);
4327 bnx2_set_power_state(bp, PCI_D3hot);
4331 #define GET_NET_STATS64(ctr) \
4332 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4333 (unsigned long) (ctr##_lo)
4335 #define GET_NET_STATS32(ctr) \
4338 #if (BITS_PER_LONG == 64)
4339 #define GET_NET_STATS GET_NET_STATS64
4341 #define GET_NET_STATS GET_NET_STATS32
4344 static struct net_device_stats *
4345 bnx2_get_stats(struct net_device *dev)
4347 struct bnx2 *bp = netdev_priv(dev);
4348 struct statistics_block *stats_blk = bp->stats_blk;
4349 struct net_device_stats *net_stats = &bp->net_stats;
4351 if (bp->stats_blk == NULL) {
4354 net_stats->rx_packets =
4355 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4356 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4357 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4359 net_stats->tx_packets =
4360 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4361 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4362 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4364 net_stats->rx_bytes =
4365 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4367 net_stats->tx_bytes =
4368 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4370 net_stats->multicast =
4371 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4373 net_stats->collisions =
4374 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4376 net_stats->rx_length_errors =
4377 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4378 stats_blk->stat_EtherStatsOverrsizePkts);
4380 net_stats->rx_over_errors =
4381 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4383 net_stats->rx_frame_errors =
4384 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4386 net_stats->rx_crc_errors =
4387 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4389 net_stats->rx_errors = net_stats->rx_length_errors +
4390 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4391 net_stats->rx_crc_errors;
4393 net_stats->tx_aborted_errors =
4394 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4395 stats_blk->stat_Dot3StatsLateCollisions);
4397 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4398 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4399 net_stats->tx_carrier_errors = 0;
4401 net_stats->tx_carrier_errors =
4403 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4406 net_stats->tx_errors =
4408 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4410 net_stats->tx_aborted_errors +
4411 net_stats->tx_carrier_errors;
4416 /* All ethtool functions called with rtnl_lock */
4419 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4421 struct bnx2 *bp = netdev_priv(dev);
4423 cmd->supported = SUPPORTED_Autoneg;
4424 if (bp->phy_flags & PHY_SERDES_FLAG) {
4425 cmd->supported |= SUPPORTED_1000baseT_Full |
4428 cmd->port = PORT_FIBRE;
4431 cmd->supported |= SUPPORTED_10baseT_Half |
4432 SUPPORTED_10baseT_Full |
4433 SUPPORTED_100baseT_Half |
4434 SUPPORTED_100baseT_Full |
4435 SUPPORTED_1000baseT_Full |
4438 cmd->port = PORT_TP;
4441 cmd->advertising = bp->advertising;
4443 if (bp->autoneg & AUTONEG_SPEED) {
4444 cmd->autoneg = AUTONEG_ENABLE;
4447 cmd->autoneg = AUTONEG_DISABLE;
4450 if (netif_carrier_ok(dev)) {
4451 cmd->speed = bp->line_speed;
4452 cmd->duplex = bp->duplex;
4459 cmd->transceiver = XCVR_INTERNAL;
4460 cmd->phy_address = bp->phy_addr;
4466 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4468 struct bnx2 *bp = netdev_priv(dev);
4469 u8 autoneg = bp->autoneg;
4470 u8 req_duplex = bp->req_duplex;
4471 u16 req_line_speed = bp->req_line_speed;
4472 u32 advertising = bp->advertising;
4474 if (cmd->autoneg == AUTONEG_ENABLE) {
4475 autoneg |= AUTONEG_SPEED;
4477 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4479 /* allow advertising 1 speed */
4480 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4481 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4482 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4483 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4485 if (bp->phy_flags & PHY_SERDES_FLAG)
4488 advertising = cmd->advertising;
4491 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4492 advertising = cmd->advertising;
4494 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4498 if (bp->phy_flags & PHY_SERDES_FLAG) {
4499 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4502 advertising = ETHTOOL_ALL_COPPER_SPEED;
4505 advertising |= ADVERTISED_Autoneg;
4508 if (bp->phy_flags & PHY_SERDES_FLAG) {
4509 if ((cmd->speed != SPEED_1000) ||
4510 (cmd->duplex != DUPLEX_FULL)) {
4514 else if (cmd->speed == SPEED_1000) {
4517 autoneg &= ~AUTONEG_SPEED;
4518 req_line_speed = cmd->speed;
4519 req_duplex = cmd->duplex;
4523 bp->autoneg = autoneg;
4524 bp->advertising = advertising;
4525 bp->req_line_speed = req_line_speed;
4526 bp->req_duplex = req_duplex;
4528 spin_lock_bh(&bp->phy_lock);
4532 spin_unlock_bh(&bp->phy_lock);
4538 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4540 struct bnx2 *bp = netdev_priv(dev);
4542 strcpy(info->driver, DRV_MODULE_NAME);
4543 strcpy(info->version, DRV_MODULE_VERSION);
4544 strcpy(info->bus_info, pci_name(bp->pdev));
4545 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4546 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4547 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4548 info->fw_version[1] = info->fw_version[3] = '.';
4549 info->fw_version[5] = 0;
4552 #define BNX2_REGDUMP_LEN (32 * 1024)
4555 bnx2_get_regs_len(struct net_device *dev)
4557 return BNX2_REGDUMP_LEN;
4561 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4563 u32 *p = _p, i, offset;
4565 struct bnx2 *bp = netdev_priv(dev);
4566 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4567 0x0800, 0x0880, 0x0c00, 0x0c10,
4568 0x0c30, 0x0d08, 0x1000, 0x101c,
4569 0x1040, 0x1048, 0x1080, 0x10a4,
4570 0x1400, 0x1490, 0x1498, 0x14f0,
4571 0x1500, 0x155c, 0x1580, 0x15dc,
4572 0x1600, 0x1658, 0x1680, 0x16d8,
4573 0x1800, 0x1820, 0x1840, 0x1854,
4574 0x1880, 0x1894, 0x1900, 0x1984,
4575 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4576 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4577 0x2000, 0x2030, 0x23c0, 0x2400,
4578 0x2800, 0x2820, 0x2830, 0x2850,
4579 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4580 0x3c00, 0x3c94, 0x4000, 0x4010,
4581 0x4080, 0x4090, 0x43c0, 0x4458,
4582 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4583 0x4fc0, 0x5010, 0x53c0, 0x5444,
4584 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4585 0x5fc0, 0x6000, 0x6400, 0x6428,
4586 0x6800, 0x6848, 0x684c, 0x6860,
4587 0x6888, 0x6910, 0x8000 };
4591 memset(p, 0, BNX2_REGDUMP_LEN);
4593 if (!netif_running(bp->dev))
4597 offset = reg_boundaries[0];
4599 while (offset < BNX2_REGDUMP_LEN) {
4600 *p++ = REG_RD(bp, offset);
4602 if (offset == reg_boundaries[i + 1]) {
4603 offset = reg_boundaries[i + 2];
4604 p = (u32 *) (orig_p + offset);
4611 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4613 struct bnx2 *bp = netdev_priv(dev);
4615 if (bp->flags & NO_WOL_FLAG) {
4620 wol->supported = WAKE_MAGIC;
4622 wol->wolopts = WAKE_MAGIC;
4626 memset(&wol->sopass, 0, sizeof(wol->sopass));
4630 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4632 struct bnx2 *bp = netdev_priv(dev);
4634 if (wol->wolopts & ~WAKE_MAGIC)
4637 if (wol->wolopts & WAKE_MAGIC) {
4638 if (bp->flags & NO_WOL_FLAG)
4650 bnx2_nway_reset(struct net_device *dev)
4652 struct bnx2 *bp = netdev_priv(dev);
4655 if (!(bp->autoneg & AUTONEG_SPEED)) {
4659 spin_lock_bh(&bp->phy_lock);
4661 /* Force a link down visible on the other side */
4662 if (bp->phy_flags & PHY_SERDES_FLAG) {
4663 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4664 spin_unlock_bh(&bp->phy_lock);
4668 spin_lock_bh(&bp->phy_lock);
4669 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4670 bp->current_interval = SERDES_AN_TIMEOUT;
4671 bp->serdes_an_pending = 1;
4672 mod_timer(&bp->timer, jiffies + bp->current_interval);
4676 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4677 bmcr &= ~BMCR_LOOPBACK;
4678 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4680 spin_unlock_bh(&bp->phy_lock);
4686 bnx2_get_eeprom_len(struct net_device *dev)
4688 struct bnx2 *bp = netdev_priv(dev);
4690 if (bp->flash_info == NULL)
4693 return (int) bp->flash_size;
4697 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4700 struct bnx2 *bp = netdev_priv(dev);
4703 /* parameters already validated in ethtool_get_eeprom */
4705 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4711 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4714 struct bnx2 *bp = netdev_priv(dev);
4717 /* parameters already validated in ethtool_set_eeprom */
4719 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4725 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4727 struct bnx2 *bp = netdev_priv(dev);
4729 memset(coal, 0, sizeof(struct ethtool_coalesce));
4731 coal->rx_coalesce_usecs = bp->rx_ticks;
4732 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4733 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4734 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4736 coal->tx_coalesce_usecs = bp->tx_ticks;
4737 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4738 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4739 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4741 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4747 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4749 struct bnx2 *bp = netdev_priv(dev);
4751 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4752 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4754 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4755 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4757 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4758 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4760 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4761 if (bp->rx_quick_cons_trip_int > 0xff)
4762 bp->rx_quick_cons_trip_int = 0xff;
4764 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4765 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4767 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4768 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4770 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4771 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4773 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4774 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4777 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4778 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4779 bp->stats_ticks &= 0xffff00;
4781 if (netif_running(bp->dev)) {
4782 bnx2_netif_stop(bp);
4784 bnx2_netif_start(bp);
4791 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4793 struct bnx2 *bp = netdev_priv(dev);
4795 ering->rx_max_pending = MAX_RX_DESC_CNT;
4796 ering->rx_mini_max_pending = 0;
4797 ering->rx_jumbo_max_pending = 0;
4799 ering->rx_pending = bp->rx_ring_size;
4800 ering->rx_mini_pending = 0;
4801 ering->rx_jumbo_pending = 0;
4803 ering->tx_max_pending = MAX_TX_DESC_CNT;
4804 ering->tx_pending = bp->tx_ring_size;
4808 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4810 struct bnx2 *bp = netdev_priv(dev);
4812 if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
4813 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4814 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4818 bp->rx_ring_size = ering->rx_pending;
4819 bp->tx_ring_size = ering->tx_pending;
4821 if (netif_running(bp->dev)) {
4822 bnx2_netif_stop(bp);
4824 bnx2_netif_start(bp);
4831 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4833 struct bnx2 *bp = netdev_priv(dev);
4835 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4836 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4837 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4841 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4843 struct bnx2 *bp = netdev_priv(dev);
4845 bp->req_flow_ctrl = 0;
4846 if (epause->rx_pause)
4847 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4848 if (epause->tx_pause)
4849 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4851 if (epause->autoneg) {
4852 bp->autoneg |= AUTONEG_FLOW_CTRL;
4855 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4858 spin_lock_bh(&bp->phy_lock);
4862 spin_unlock_bh(&bp->phy_lock);
4868 bnx2_get_rx_csum(struct net_device *dev)
4870 struct bnx2 *bp = netdev_priv(dev);
4876 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4878 struct bnx2 *bp = netdev_priv(dev);
4884 #define BNX2_NUM_STATS 45
4887 char string[ETH_GSTRING_LEN];
4888 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4890 { "rx_error_bytes" },
4892 { "tx_error_bytes" },
4893 { "rx_ucast_packets" },
4894 { "rx_mcast_packets" },
4895 { "rx_bcast_packets" },
4896 { "tx_ucast_packets" },
4897 { "tx_mcast_packets" },
4898 { "tx_bcast_packets" },
4899 { "tx_mac_errors" },
4900 { "tx_carrier_errors" },
4901 { "rx_crc_errors" },
4902 { "rx_align_errors" },
4903 { "tx_single_collisions" },
4904 { "tx_multi_collisions" },
4906 { "tx_excess_collisions" },
4907 { "tx_late_collisions" },
4908 { "tx_total_collisions" },
4911 { "rx_undersize_packets" },
4912 { "rx_oversize_packets" },
4913 { "rx_64_byte_packets" },
4914 { "rx_65_to_127_byte_packets" },
4915 { "rx_128_to_255_byte_packets" },
4916 { "rx_256_to_511_byte_packets" },
4917 { "rx_512_to_1023_byte_packets" },
4918 { "rx_1024_to_1522_byte_packets" },
4919 { "rx_1523_to_9022_byte_packets" },
4920 { "tx_64_byte_packets" },
4921 { "tx_65_to_127_byte_packets" },
4922 { "tx_128_to_255_byte_packets" },
4923 { "tx_256_to_511_byte_packets" },
4924 { "tx_512_to_1023_byte_packets" },
4925 { "tx_1024_to_1522_byte_packets" },
4926 { "tx_1523_to_9022_byte_packets" },
4927 { "rx_xon_frames" },
4928 { "rx_xoff_frames" },
4929 { "tx_xon_frames" },
4930 { "tx_xoff_frames" },
4931 { "rx_mac_ctrl_frames" },
4932 { "rx_filtered_packets" },
4936 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4938 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4939 STATS_OFFSET32(stat_IfHCInOctets_hi),
4940 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4941 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4942 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4943 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4944 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4945 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4946 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
4947 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
4948 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
4949 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
4950 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
4951 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
4952 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
4953 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
4954 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
4955 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
4956 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
4957 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
4958 STATS_OFFSET32(stat_EtherStatsCollisions),
4959 STATS_OFFSET32(stat_EtherStatsFragments),
4960 STATS_OFFSET32(stat_EtherStatsJabbers),
4961 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
4962 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
4963 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
4964 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
4965 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
4966 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
4967 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
4968 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
4969 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
4970 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
4971 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
4972 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
4973 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
4974 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
4975 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
4976 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
4977 STATS_OFFSET32(stat_XonPauseFramesReceived),
4978 STATS_OFFSET32(stat_XoffPauseFramesReceived),
4979 STATS_OFFSET32(stat_OutXonSent),
4980 STATS_OFFSET32(stat_OutXoffSent),
4981 STATS_OFFSET32(stat_MacControlFramesReceived),
4982 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
4983 STATS_OFFSET32(stat_IfInMBUFDiscards),
4986 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4987 * skipped because of errata.
4989 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4990 8,0,8,8,8,8,8,8,8,8,
4991 4,0,4,4,4,4,4,4,4,4,
4992 4,4,4,4,4,4,4,4,4,4,
4993 4,4,4,4,4,4,4,4,4,4,
4997 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
4998 8,0,8,8,8,8,8,8,8,8,
4999 4,4,4,4,4,4,4,4,4,4,
5000 4,4,4,4,4,4,4,4,4,4,
5001 4,4,4,4,4,4,4,4,4,4,
5005 #define BNX2_NUM_TESTS 6
5008 char string[ETH_GSTRING_LEN];
5009 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5010 { "register_test (offline)" },
5011 { "memory_test (offline)" },
5012 { "loopback_test (offline)" },
5013 { "nvram_test (online)" },
5014 { "interrupt_test (online)" },
5015 { "link_test (online)" },
5019 bnx2_self_test_count(struct net_device *dev)
5021 return BNX2_NUM_TESTS;
5025 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5027 struct bnx2 *bp = netdev_priv(dev);
5029 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5030 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5031 bnx2_netif_stop(bp);
5032 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5035 if (bnx2_test_registers(bp) != 0) {
5037 etest->flags |= ETH_TEST_FL_FAILED;
5039 if (bnx2_test_memory(bp) != 0) {
5041 etest->flags |= ETH_TEST_FL_FAILED;
5043 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5044 etest->flags |= ETH_TEST_FL_FAILED;
5046 if (!netif_running(bp->dev)) {
5047 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5051 bnx2_netif_start(bp);
5054 /* wait for link up */
5055 msleep_interruptible(3000);
5056 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5057 msleep_interruptible(4000);
5060 if (bnx2_test_nvram(bp) != 0) {
5062 etest->flags |= ETH_TEST_FL_FAILED;
5064 if (bnx2_test_intr(bp) != 0) {
5066 etest->flags |= ETH_TEST_FL_FAILED;
5069 if (bnx2_test_link(bp) != 0) {
5071 etest->flags |= ETH_TEST_FL_FAILED;
5077 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5079 switch (stringset) {
5081 memcpy(buf, bnx2_stats_str_arr,
5082 sizeof(bnx2_stats_str_arr));
5085 memcpy(buf, bnx2_tests_str_arr,
5086 sizeof(bnx2_tests_str_arr));
5092 bnx2_get_stats_count(struct net_device *dev)
5094 return BNX2_NUM_STATS;
5098 bnx2_get_ethtool_stats(struct net_device *dev,
5099 struct ethtool_stats *stats, u64 *buf)
5101 struct bnx2 *bp = netdev_priv(dev);
5103 u32 *hw_stats = (u32 *) bp->stats_blk;
5104 u8 *stats_len_arr = NULL;
5106 if (hw_stats == NULL) {
5107 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5111 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5112 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5113 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5114 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5115 stats_len_arr = bnx2_5706_stats_len_arr;
5117 stats_len_arr = bnx2_5708_stats_len_arr;
5119 for (i = 0; i < BNX2_NUM_STATS; i++) {
5120 if (stats_len_arr[i] == 0) {
5121 /* skip this counter */
5125 if (stats_len_arr[i] == 4) {
5126 /* 4-byte counter */
5128 *(hw_stats + bnx2_stats_offset_arr[i]);
5131 /* 8-byte counter */
5132 buf[i] = (((u64) *(hw_stats +
5133 bnx2_stats_offset_arr[i])) << 32) +
5134 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5139 bnx2_phys_id(struct net_device *dev, u32 data)
5141 struct bnx2 *bp = netdev_priv(dev);
5148 save = REG_RD(bp, BNX2_MISC_CFG);
5149 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5151 for (i = 0; i < (data * 2); i++) {
5153 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5156 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5157 BNX2_EMAC_LED_1000MB_OVERRIDE |
5158 BNX2_EMAC_LED_100MB_OVERRIDE |
5159 BNX2_EMAC_LED_10MB_OVERRIDE |
5160 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5161 BNX2_EMAC_LED_TRAFFIC);
5163 msleep_interruptible(500);
5164 if (signal_pending(current))
5167 REG_WR(bp, BNX2_EMAC_LED, 0);
5168 REG_WR(bp, BNX2_MISC_CFG, save);
5172 static struct ethtool_ops bnx2_ethtool_ops = {
5173 .get_settings = bnx2_get_settings,
5174 .set_settings = bnx2_set_settings,
5175 .get_drvinfo = bnx2_get_drvinfo,
5176 .get_regs_len = bnx2_get_regs_len,
5177 .get_regs = bnx2_get_regs,
5178 .get_wol = bnx2_get_wol,
5179 .set_wol = bnx2_set_wol,
5180 .nway_reset = bnx2_nway_reset,
5181 .get_link = ethtool_op_get_link,
5182 .get_eeprom_len = bnx2_get_eeprom_len,
5183 .get_eeprom = bnx2_get_eeprom,
5184 .set_eeprom = bnx2_set_eeprom,
5185 .get_coalesce = bnx2_get_coalesce,
5186 .set_coalesce = bnx2_set_coalesce,
5187 .get_ringparam = bnx2_get_ringparam,
5188 .set_ringparam = bnx2_set_ringparam,
5189 .get_pauseparam = bnx2_get_pauseparam,
5190 .set_pauseparam = bnx2_set_pauseparam,
5191 .get_rx_csum = bnx2_get_rx_csum,
5192 .set_rx_csum = bnx2_set_rx_csum,
5193 .get_tx_csum = ethtool_op_get_tx_csum,
5194 .set_tx_csum = ethtool_op_set_tx_csum,
5195 .get_sg = ethtool_op_get_sg,
5196 .set_sg = ethtool_op_set_sg,
5198 .get_tso = ethtool_op_get_tso,
5199 .set_tso = ethtool_op_set_tso,
5201 .self_test_count = bnx2_self_test_count,
5202 .self_test = bnx2_self_test,
5203 .get_strings = bnx2_get_strings,
5204 .phys_id = bnx2_phys_id,
5205 .get_stats_count = bnx2_get_stats_count,
5206 .get_ethtool_stats = bnx2_get_ethtool_stats,
5207 .get_perm_addr = ethtool_op_get_perm_addr,
5210 /* Called with rtnl_lock */
5212 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5214 struct mii_ioctl_data *data = if_mii(ifr);
5215 struct bnx2 *bp = netdev_priv(dev);
5220 data->phy_id = bp->phy_addr;
5226 spin_lock_bh(&bp->phy_lock);
5227 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5228 spin_unlock_bh(&bp->phy_lock);
5230 data->val_out = mii_regval;
5236 if (!capable(CAP_NET_ADMIN))
5239 spin_lock_bh(&bp->phy_lock);
5240 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5241 spin_unlock_bh(&bp->phy_lock);
5252 /* Called with rtnl_lock */
5254 bnx2_change_mac_addr(struct net_device *dev, void *p)
5256 struct sockaddr *addr = p;
5257 struct bnx2 *bp = netdev_priv(dev);
5259 if (!is_valid_ether_addr(addr->sa_data))
5262 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5263 if (netif_running(dev))
5264 bnx2_set_mac_addr(bp);
5269 /* Called with rtnl_lock */
5271 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5273 struct bnx2 *bp = netdev_priv(dev);
5275 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5276 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5280 if (netif_running(dev)) {
5281 bnx2_netif_stop(bp);
5285 bnx2_netif_start(bp);
5290 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5292 poll_bnx2(struct net_device *dev)
5294 struct bnx2 *bp = netdev_priv(dev);
5296 disable_irq(bp->pdev->irq);
5297 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5298 enable_irq(bp->pdev->irq);
5302 static int __devinit
5303 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5306 unsigned long mem_len;
5310 SET_MODULE_OWNER(dev);
5311 SET_NETDEV_DEV(dev, &pdev->dev);
5312 bp = netdev_priv(dev);
5317 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5318 rc = pci_enable_device(pdev);
5320 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5324 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5325 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5328 goto err_out_disable;
5331 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5333 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5334 goto err_out_disable;
5337 pci_set_master(pdev);
5339 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5340 if (bp->pm_cap == 0) {
5341 printk(KERN_ERR PFX "Cannot find power management capability, "
5344 goto err_out_release;
5347 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5348 if (bp->pcix_cap == 0) {
5349 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5351 goto err_out_release;
5354 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5355 bp->flags |= USING_DAC_FLAG;
5356 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5357 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5358 "failed, aborting.\n");
5360 goto err_out_release;
5363 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5364 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5366 goto err_out_release;
5372 spin_lock_init(&bp->phy_lock);
5373 spin_lock_init(&bp->tx_lock);
5374 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5376 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5377 mem_len = MB_GET_CID_ADDR(17);
5378 dev->mem_end = dev->mem_start + mem_len;
5379 dev->irq = pdev->irq;
5381 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5384 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5386 goto err_out_release;
5389 /* Configure byte swap and enable write to the reg_window registers.
5390 * Rely on CPU to do target byte swapping on big endian systems
5391 * The chip's target access swapping will not swap all accesses
5393 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5394 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5395 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5397 bnx2_set_power_state(bp, PCI_D0);
5399 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5401 /* Get bus information. */
5402 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5403 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5406 bp->flags |= PCIX_FLAG;
5408 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5410 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5413 bp->bus_speed_mhz = 133;
5416 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5417 bp->bus_speed_mhz = 100;
5420 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5421 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5422 bp->bus_speed_mhz = 66;
5425 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5426 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5427 bp->bus_speed_mhz = 50;
5430 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5431 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5433 bp->bus_speed_mhz = 33;
5438 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5439 bp->bus_speed_mhz = 66;
5441 bp->bus_speed_mhz = 33;
5444 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5445 bp->flags |= PCI_32BIT_FLAG;
5447 /* 5706A0 may falsely detect SERR and PERR. */
5448 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5449 reg = REG_RD(bp, PCI_COMMAND);
5450 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5451 REG_WR(bp, PCI_COMMAND, reg);
5453 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5454 !(bp->flags & PCIX_FLAG)) {
5456 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5461 bnx2_init_nvram(bp);
5463 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5465 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5466 BNX2_SHM_HDR_SIGNATURE_SIG)
5467 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5469 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5471 /* Get the permanent MAC address. First we need to make sure the
5472 * firmware is actually running.
5474 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5476 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5477 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5478 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5483 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5485 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5486 bp->mac_addr[0] = (u8) (reg >> 8);
5487 bp->mac_addr[1] = (u8) reg;
5489 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5490 bp->mac_addr[2] = (u8) (reg >> 24);
5491 bp->mac_addr[3] = (u8) (reg >> 16);
5492 bp->mac_addr[4] = (u8) (reg >> 8);
5493 bp->mac_addr[5] = (u8) reg;
5495 bp->tx_ring_size = MAX_TX_DESC_CNT;
5496 bp->rx_ring_size = 100;
5500 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5502 bp->tx_quick_cons_trip_int = 20;
5503 bp->tx_quick_cons_trip = 20;
5504 bp->tx_ticks_int = 80;
5507 bp->rx_quick_cons_trip_int = 6;
5508 bp->rx_quick_cons_trip = 6;
5509 bp->rx_ticks_int = 18;
5512 bp->stats_ticks = 1000000 & 0xffff00;
5514 bp->timer_interval = HZ;
5515 bp->current_interval = HZ;
5519 /* Disable WOL support if we are running on a SERDES chip. */
5520 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5521 bp->phy_flags |= PHY_SERDES_FLAG;
5522 bp->flags |= NO_WOL_FLAG;
5523 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5525 reg = REG_RD_IND(bp, bp->shmem_base +
5526 BNX2_SHARED_HW_CFG_CONFIG);
5527 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5528 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5533 bp->flags |= NO_WOL_FLAG;
5535 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5536 bp->tx_quick_cons_trip_int =
5537 bp->tx_quick_cons_trip;
5538 bp->tx_ticks_int = bp->tx_ticks;
5539 bp->rx_quick_cons_trip_int =
5540 bp->rx_quick_cons_trip;
5541 bp->rx_ticks_int = bp->rx_ticks;
5542 bp->comp_prod_trip_int = bp->comp_prod_trip;
5543 bp->com_ticks_int = bp->com_ticks;
5544 bp->cmd_ticks_int = bp->cmd_ticks;
5547 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5548 bp->req_line_speed = 0;
5549 if (bp->phy_flags & PHY_SERDES_FLAG) {
5550 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5552 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5553 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5554 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5556 bp->req_line_speed = bp->line_speed = SPEED_1000;
5557 bp->req_duplex = DUPLEX_FULL;
5561 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5564 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5566 init_timer(&bp->timer);
5567 bp->timer.expires = RUN_AT(bp->timer_interval);
5568 bp->timer.data = (unsigned long) bp;
5569 bp->timer.function = bnx2_timer;
5575 iounmap(bp->regview);
5580 pci_release_regions(pdev);
5583 pci_disable_device(pdev);
5584 pci_set_drvdata(pdev, NULL);
5590 static int __devinit
5591 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5593 static int version_printed = 0;
5594 struct net_device *dev = NULL;
5598 if (version_printed++ == 0)
5599 printk(KERN_INFO "%s", version);
5601 /* dev zeroed in init_etherdev */
5602 dev = alloc_etherdev(sizeof(*bp));
5607 rc = bnx2_init_board(pdev, dev);
5613 dev->open = bnx2_open;
5614 dev->hard_start_xmit = bnx2_start_xmit;
5615 dev->stop = bnx2_close;
5616 dev->get_stats = bnx2_get_stats;
5617 dev->set_multicast_list = bnx2_set_rx_mode;
5618 dev->do_ioctl = bnx2_ioctl;
5619 dev->set_mac_address = bnx2_change_mac_addr;
5620 dev->change_mtu = bnx2_change_mtu;
5621 dev->tx_timeout = bnx2_tx_timeout;
5622 dev->watchdog_timeo = TX_TIMEOUT;
5624 dev->vlan_rx_register = bnx2_vlan_rx_register;
5625 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5627 dev->poll = bnx2_poll;
5628 dev->ethtool_ops = &bnx2_ethtool_ops;
5631 bp = netdev_priv(dev);
5633 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5634 dev->poll_controller = poll_bnx2;
5637 if ((rc = register_netdev(dev))) {
5638 printk(KERN_ERR PFX "Cannot register net device\n");
5640 iounmap(bp->regview);
5641 pci_release_regions(pdev);
5642 pci_disable_device(pdev);
5643 pci_set_drvdata(pdev, NULL);
5648 pci_set_drvdata(pdev, dev);
5650 memcpy(dev->dev_addr, bp->mac_addr, 6);
5651 memcpy(dev->perm_addr, bp->mac_addr, 6);
5652 bp->name = board_info[ent->driver_data].name,
5653 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5657 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5658 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5659 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5660 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5665 printk("node addr ");
5666 for (i = 0; i < 6; i++)
5667 printk("%2.2x", dev->dev_addr[i]);
5670 dev->features |= NETIF_F_SG;
5671 if (bp->flags & USING_DAC_FLAG)
5672 dev->features |= NETIF_F_HIGHDMA;
5673 dev->features |= NETIF_F_IP_CSUM;
5675 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5678 dev->features |= NETIF_F_TSO;
5681 netif_carrier_off(bp->dev);
5686 static void __devexit
5687 bnx2_remove_one(struct pci_dev *pdev)
5689 struct net_device *dev = pci_get_drvdata(pdev);
5690 struct bnx2 *bp = netdev_priv(dev);
5692 flush_scheduled_work();
5694 unregister_netdev(dev);
5697 iounmap(bp->regview);
5700 pci_release_regions(pdev);
5701 pci_disable_device(pdev);
5702 pci_set_drvdata(pdev, NULL);
5706 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5708 struct net_device *dev = pci_get_drvdata(pdev);
5709 struct bnx2 *bp = netdev_priv(dev);
5712 if (!netif_running(dev))
5715 bnx2_netif_stop(bp);
5716 netif_device_detach(dev);
5717 del_timer_sync(&bp->timer);
5718 if (bp->flags & NO_WOL_FLAG)
5719 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5721 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5723 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5724 bnx2_reset_chip(bp, reset_code);
5726 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5731 bnx2_resume(struct pci_dev *pdev)
5733 struct net_device *dev = pci_get_drvdata(pdev);
5734 struct bnx2 *bp = netdev_priv(dev);
5736 if (!netif_running(dev))
5739 bnx2_set_power_state(bp, PCI_D0);
5740 netif_device_attach(dev);
5742 bnx2_netif_start(bp);
5746 static struct pci_driver bnx2_pci_driver = {
5747 .name = DRV_MODULE_NAME,
5748 .id_table = bnx2_pci_tbl,
5749 .probe = bnx2_init_one,
5750 .remove = __devexit_p(bnx2_remove_one),
5751 .suspend = bnx2_suspend,
5752 .resume = bnx2_resume,
5755 static int __init bnx2_init(void)
5757 return pci_module_init(&bnx2_pci_driver);
5760 static void __exit bnx2_cleanup(void)
5762 pci_unregister_driver(&bnx2_pci_driver);
5765 module_init(bnx2_init);
5766 module_exit(bnx2_cleanup);