]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/ixgbe/ixgbe_common.c
ixgbe: DCB set PFC high and low water marks per data sheet specs
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_common.c
CommitLineData
9a799d71
AK
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
8c47eaa7 4 Copyright(c) 1999 - 2010 Intel Corporation.
9a799d71
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
9a799d71
AK
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
ccffad25 31#include <linux/netdevice.h>
9a799d71 32
11afc1b1 33#include "ixgbe.h"
9a799d71
AK
34#include "ixgbe_common.h"
35#include "ixgbe_phy.h"
36
c44ade9e 37static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
9a799d71
AK
38static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
39static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
c44ade9e
JB
40static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
41static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
42static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
43 u16 count);
44static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
9a799d71
AK
48static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
49
c44ade9e
JB
50static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
51static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
9a799d71 52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
c44ade9e 53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
7b25cdba 54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
5d5b7c39 55static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
9a799d71
AK
56
57/**
c44ade9e 58 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
9a799d71
AK
59 * @hw: pointer to hardware structure
60 *
61 * Starts the hardware by filling the bus info structure and media type, clears
62 * all on chip counters, initializes receive address registers, multicast
63 * table, VLAN filter table, calls routine to set up link and flow control
64 * settings, and leaves transmit and receive units disabled and uninitialized
65 **/
c44ade9e 66s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
9a799d71
AK
67{
68 u32 ctrl_ext;
69
70 /* Set the media type */
71 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
72
73 /* Identify the PHY */
c44ade9e 74 hw->phy.ops.identify(hw);
9a799d71 75
9a799d71 76 /* Clear the VLAN filter table */
c44ade9e 77 hw->mac.ops.clear_vfta(hw);
9a799d71 78
9a799d71 79 /* Clear statistics registers */
c44ade9e 80 hw->mac.ops.clear_hw_cntrs(hw);
9a799d71
AK
81
82 /* Set No Snoop Disable */
83 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
84 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
85 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3957d63d 86 IXGBE_WRITE_FLUSH(hw);
9a799d71 87
620fa036
MC
88 /* Setup flow control */
89 ixgbe_setup_fc(hw, 0);
90
9a799d71
AK
91 /* Clear adapter stopped flag */
92 hw->adapter_stopped = false;
93
94 return 0;
95}
96
97/**
c44ade9e 98 * ixgbe_init_hw_generic - Generic hardware initialization
9a799d71
AK
99 * @hw: pointer to hardware structure
100 *
c44ade9e 101 * Initialize the hardware by resetting the hardware, filling the bus info
9a799d71
AK
102 * structure and media type, clears all on chip counters, initializes receive
103 * address registers, multicast table, VLAN filter table, calls routine to set
104 * up link and flow control settings, and leaves transmit and receive units
105 * disabled and uninitialized
106 **/
c44ade9e 107s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
9a799d71 108{
794caeb2
PWJ
109 s32 status;
110
9a799d71 111 /* Reset the hardware */
794caeb2 112 status = hw->mac.ops.reset_hw(hw);
9a799d71 113
794caeb2
PWJ
114 if (status == 0) {
115 /* Start the HW */
116 status = hw->mac.ops.start_hw(hw);
117 }
9a799d71 118
794caeb2 119 return status;
9a799d71
AK
120}
121
122/**
c44ade9e 123 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
9a799d71
AK
124 * @hw: pointer to hardware structure
125 *
126 * Clears all hardware statistics counters by reading them from the hardware
127 * Statistics counters are clear on read.
128 **/
c44ade9e 129s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
9a799d71
AK
130{
131 u16 i = 0;
132
133 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
134 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
135 IXGBE_READ_REG(hw, IXGBE_ERRBC);
136 IXGBE_READ_REG(hw, IXGBE_MSPDC);
137 for (i = 0; i < 8; i++)
138 IXGBE_READ_REG(hw, IXGBE_MPC(i));
139
140 IXGBE_READ_REG(hw, IXGBE_MLFC);
141 IXGBE_READ_REG(hw, IXGBE_MRFC);
142 IXGBE_READ_REG(hw, IXGBE_RLEC);
143 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
144 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
145 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
146 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
147
148 for (i = 0; i < 8; i++) {
149 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
151 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
152 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
153 }
154
155 IXGBE_READ_REG(hw, IXGBE_PRC64);
156 IXGBE_READ_REG(hw, IXGBE_PRC127);
157 IXGBE_READ_REG(hw, IXGBE_PRC255);
158 IXGBE_READ_REG(hw, IXGBE_PRC511);
159 IXGBE_READ_REG(hw, IXGBE_PRC1023);
160 IXGBE_READ_REG(hw, IXGBE_PRC1522);
161 IXGBE_READ_REG(hw, IXGBE_GPRC);
162 IXGBE_READ_REG(hw, IXGBE_BPRC);
163 IXGBE_READ_REG(hw, IXGBE_MPRC);
164 IXGBE_READ_REG(hw, IXGBE_GPTC);
165 IXGBE_READ_REG(hw, IXGBE_GORCL);
166 IXGBE_READ_REG(hw, IXGBE_GORCH);
167 IXGBE_READ_REG(hw, IXGBE_GOTCL);
168 IXGBE_READ_REG(hw, IXGBE_GOTCH);
169 for (i = 0; i < 8; i++)
170 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
171 IXGBE_READ_REG(hw, IXGBE_RUC);
172 IXGBE_READ_REG(hw, IXGBE_RFC);
173 IXGBE_READ_REG(hw, IXGBE_ROC);
174 IXGBE_READ_REG(hw, IXGBE_RJC);
175 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
176 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
177 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
178 IXGBE_READ_REG(hw, IXGBE_TORL);
179 IXGBE_READ_REG(hw, IXGBE_TORH);
180 IXGBE_READ_REG(hw, IXGBE_TPR);
181 IXGBE_READ_REG(hw, IXGBE_TPT);
182 IXGBE_READ_REG(hw, IXGBE_PTC64);
183 IXGBE_READ_REG(hw, IXGBE_PTC127);
184 IXGBE_READ_REG(hw, IXGBE_PTC255);
185 IXGBE_READ_REG(hw, IXGBE_PTC511);
186 IXGBE_READ_REG(hw, IXGBE_PTC1023);
187 IXGBE_READ_REG(hw, IXGBE_PTC1522);
188 IXGBE_READ_REG(hw, IXGBE_MPTC);
189 IXGBE_READ_REG(hw, IXGBE_BPTC);
190 for (i = 0; i < 16; i++) {
191 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
192 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
193 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
194 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
195 }
196
197 return 0;
198}
199
200/**
c44ade9e
JB
201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
202 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM
204 *
205 * Reads the part number from the EEPROM.
206 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
208{
209 s32 ret_val;
210 u16 data;
211
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val;
216 }
217 *pba_num = (u32)(data << 16);
218
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
220 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val;
223 }
224 *pba_num |= data;
225
226 return 0;
227}
228
229/**
230 * ixgbe_get_mac_addr_generic - Generic get MAC address
9a799d71
AK
231 * @hw: pointer to hardware structure
232 * @mac_addr: Adapter MAC address
233 *
234 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
235 * A reset of the adapter must be performed prior to calling this function
236 * in order for the MAC address to have been loaded from the EEPROM into RAR0
237 **/
c44ade9e 238s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
9a799d71
AK
239{
240 u32 rar_high;
241 u32 rar_low;
242 u16 i;
243
244 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
245 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
246
247 for (i = 0; i < 4; i++)
248 mac_addr[i] = (u8)(rar_low >> (i*8));
249
250 for (i = 0; i < 2; i++)
251 mac_addr[i+4] = (u8)(rar_high >> (i*8));
252
253 return 0;
254}
255
11afc1b1
PW
256/**
257 * ixgbe_get_bus_info_generic - Generic set PCI bus info
258 * @hw: pointer to hardware structure
259 *
260 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
261 **/
262s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
263{
264 struct ixgbe_adapter *adapter = hw->back;
265 struct ixgbe_mac_info *mac = &hw->mac;
266 u16 link_status;
267
268 hw->bus.type = ixgbe_bus_type_pci_express;
269
270 /* Get the negotiated link width and speed from PCI config space */
271 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
272 &link_status);
273
274 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
275 case IXGBE_PCI_LINK_WIDTH_1:
276 hw->bus.width = ixgbe_bus_width_pcie_x1;
277 break;
278 case IXGBE_PCI_LINK_WIDTH_2:
279 hw->bus.width = ixgbe_bus_width_pcie_x2;
280 break;
281 case IXGBE_PCI_LINK_WIDTH_4:
282 hw->bus.width = ixgbe_bus_width_pcie_x4;
283 break;
284 case IXGBE_PCI_LINK_WIDTH_8:
285 hw->bus.width = ixgbe_bus_width_pcie_x8;
286 break;
287 default:
288 hw->bus.width = ixgbe_bus_width_unknown;
289 break;
290 }
291
292 switch (link_status & IXGBE_PCI_LINK_SPEED) {
293 case IXGBE_PCI_LINK_SPEED_2500:
294 hw->bus.speed = ixgbe_bus_speed_2500;
295 break;
296 case IXGBE_PCI_LINK_SPEED_5000:
297 hw->bus.speed = ixgbe_bus_speed_5000;
298 break;
299 default:
300 hw->bus.speed = ixgbe_bus_speed_unknown;
301 break;
302 }
303
304 mac->ops.set_lan_id(hw);
305
306 return 0;
307}
308
309/**
310 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
311 * @hw: pointer to the HW structure
312 *
313 * Determines the LAN function id by reading memory-mapped registers
314 * and swaps the port value if requested.
315 **/
316void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
317{
318 struct ixgbe_bus_info *bus = &hw->bus;
319 u32 reg;
320
321 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
322 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
323 bus->lan_id = bus->func;
324
325 /* check for a port swap */
326 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
327 if (reg & IXGBE_FACTPS_LFS)
328 bus->func ^= 0x1;
329}
330
9a799d71 331/**
c44ade9e 332 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
9a799d71
AK
333 * @hw: pointer to hardware structure
334 *
335 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
336 * disables transmit and receive units. The adapter_stopped flag is used by
337 * the shared code and drivers to determine if the adapter is in a stopped
338 * state and should not touch the hardware.
339 **/
c44ade9e 340s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
9a799d71
AK
341{
342 u32 number_of_queues;
343 u32 reg_val;
344 u16 i;
345
346 /*
347 * Set the adapter_stopped flag so other driver functions stop touching
348 * the hardware
349 */
350 hw->adapter_stopped = true;
351
352 /* Disable the receive unit */
353 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
354 reg_val &= ~(IXGBE_RXCTRL_RXEN);
355 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
c44ade9e 356 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
357 msleep(2);
358
359 /* Clear interrupt mask to stop from interrupts being generated */
360 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
361
362 /* Clear any pending interrupts */
363 IXGBE_READ_REG(hw, IXGBE_EICR);
364
365 /* Disable the transmit unit. Each queue must be disabled. */
c44ade9e 366 number_of_queues = hw->mac.max_tx_queues;
9a799d71
AK
367 for (i = 0; i < number_of_queues; i++) {
368 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
369 if (reg_val & IXGBE_TXDCTL_ENABLE) {
370 reg_val &= ~IXGBE_TXDCTL_ENABLE;
371 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
372 }
373 }
374
c44ade9e
JB
375 /*
376 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
377 * access and verify no pending requests
378 */
379 if (ixgbe_disable_pcie_master(hw) != 0)
380 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
381
9a799d71
AK
382 return 0;
383}
384
385/**
c44ade9e 386 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
9a799d71
AK
387 * @hw: pointer to hardware structure
388 * @index: led number to turn on
389 **/
c44ade9e 390s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
9a799d71
AK
391{
392 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
393
394 /* To turn on the LED, set mode to ON. */
395 led_reg &= ~IXGBE_LED_MODE_MASK(index);
396 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
397 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3957d63d 398 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
399
400 return 0;
401}
402
403/**
c44ade9e 404 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
9a799d71
AK
405 * @hw: pointer to hardware structure
406 * @index: led number to turn off
407 **/
c44ade9e 408s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
9a799d71
AK
409{
410 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
411
412 /* To turn off the LED, set mode to OFF. */
413 led_reg &= ~IXGBE_LED_MODE_MASK(index);
414 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
415 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3957d63d 416 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
417
418 return 0;
419}
420
9a799d71 421/**
c44ade9e 422 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
9a799d71
AK
423 * @hw: pointer to hardware structure
424 *
425 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
426 * ixgbe_hw struct in order to set up EEPROM access.
427 **/
c44ade9e 428s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
9a799d71
AK
429{
430 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
431 u32 eec;
432 u16 eeprom_size;
433
434 if (eeprom->type == ixgbe_eeprom_uninitialized) {
435 eeprom->type = ixgbe_eeprom_none;
c44ade9e
JB
436 /* Set default semaphore delay to 10ms which is a well
437 * tested value */
438 eeprom->semaphore_delay = 10;
9a799d71
AK
439
440 /*
441 * Check for EEPROM present first.
442 * If not present leave as none
443 */
444 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
445 if (eec & IXGBE_EEC_PRES) {
446 eeprom->type = ixgbe_eeprom_spi;
447
448 /*
449 * SPI EEPROM is assumed here. This code would need to
450 * change if a future EEPROM is not SPI.
451 */
452 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
453 IXGBE_EEC_SIZE_SHIFT);
454 eeprom->word_size = 1 << (eeprom_size +
455 IXGBE_EEPROM_WORD_SIZE_SHIFT);
456 }
457
458 if (eec & IXGBE_EEC_ADDR_SIZE)
459 eeprom->address_bits = 16;
460 else
461 eeprom->address_bits = 8;
462 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
463 "%d\n", eeprom->type, eeprom->word_size,
464 eeprom->address_bits);
465 }
466
467 return 0;
468}
469
11afc1b1
PW
470/**
471 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
472 * @hw: pointer to hardware structure
473 * @offset: offset within the EEPROM to be written to
474 * @data: 16 bit word to be written to the EEPROM
475 *
476 * If ixgbe_eeprom_update_checksum is not called after this function, the
477 * EEPROM will most likely contain an invalid checksum.
478 **/
479s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
480{
481 s32 status;
482 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
483
484 hw->eeprom.ops.init_params(hw);
485
486 if (offset >= hw->eeprom.word_size) {
487 status = IXGBE_ERR_EEPROM;
488 goto out;
489 }
490
491 /* Prepare the EEPROM for writing */
492 status = ixgbe_acquire_eeprom(hw);
493
494 if (status == 0) {
495 if (ixgbe_ready_eeprom(hw) != 0) {
496 ixgbe_release_eeprom(hw);
497 status = IXGBE_ERR_EEPROM;
498 }
499 }
500
501 if (status == 0) {
502 ixgbe_standby_eeprom(hw);
503
504 /* Send the WRITE ENABLE command (8 bit opcode ) */
505 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
506 IXGBE_EEPROM_OPCODE_BITS);
507
508 ixgbe_standby_eeprom(hw);
509
510 /*
511 * Some SPI eeproms use the 8th address bit embedded in the
512 * opcode
513 */
514 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
515 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
516
517 /* Send the Write command (8-bit opcode + addr) */
518 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
519 IXGBE_EEPROM_OPCODE_BITS);
520 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
521 hw->eeprom.address_bits);
522
523 /* Send the data */
524 data = (data >> 8) | (data << 8);
525 ixgbe_shift_out_eeprom_bits(hw, data, 16);
526 ixgbe_standby_eeprom(hw);
527
528 msleep(hw->eeprom.semaphore_delay);
529 /* Done with writing - release the EEPROM */
530 ixgbe_release_eeprom(hw);
531 }
532
533out:
534 return status;
535}
536
9a799d71 537/**
c44ade9e
JB
538 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
539 * @hw: pointer to hardware structure
540 * @offset: offset within the EEPROM to be read
541 * @data: read 16 bit value from EEPROM
542 *
543 * Reads 16 bit value from EEPROM through bit-bang method
544 **/
545s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
546 u16 *data)
547{
548 s32 status;
549 u16 word_in;
550 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
551
552 hw->eeprom.ops.init_params(hw);
553
554 if (offset >= hw->eeprom.word_size) {
555 status = IXGBE_ERR_EEPROM;
556 goto out;
557 }
558
559 /* Prepare the EEPROM for reading */
560 status = ixgbe_acquire_eeprom(hw);
561
562 if (status == 0) {
563 if (ixgbe_ready_eeprom(hw) != 0) {
564 ixgbe_release_eeprom(hw);
565 status = IXGBE_ERR_EEPROM;
566 }
567 }
568
569 if (status == 0) {
570 ixgbe_standby_eeprom(hw);
571
572 /*
573 * Some SPI eeproms use the 8th address bit embedded in the
574 * opcode
575 */
576 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
577 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
578
579 /* Send the READ command (opcode + addr) */
580 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
581 IXGBE_EEPROM_OPCODE_BITS);
582 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
583 hw->eeprom.address_bits);
584
585 /* Read the data. */
586 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
587 *data = (word_in >> 8) | (word_in << 8);
588
589 /* End this read operation */
590 ixgbe_release_eeprom(hw);
591 }
592
593out:
594 return status;
595}
596
597/**
21ce849b 598 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
9a799d71
AK
599 * @hw: pointer to hardware structure
600 * @offset: offset of word in the EEPROM to read
601 * @data: word read from the EEPROM
602 *
603 * Reads a 16 bit word from the EEPROM using the EERD register.
604 **/
21ce849b 605s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
9a799d71
AK
606{
607 u32 eerd;
608 s32 status;
609
c44ade9e
JB
610 hw->eeprom.ops.init_params(hw);
611
612 if (offset >= hw->eeprom.word_size) {
613 status = IXGBE_ERR_EEPROM;
614 goto out;
615 }
616
21ce849b
MC
617 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
618 IXGBE_EEPROM_RW_REG_START;
9a799d71
AK
619
620 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
21ce849b 621 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
9a799d71
AK
622
623 if (status == 0)
624 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
21ce849b 625 IXGBE_EEPROM_RW_REG_DATA);
9a799d71
AK
626 else
627 hw_dbg(hw, "Eeprom read timed out\n");
628
c44ade9e 629out:
9a799d71
AK
630 return status;
631}
632
633/**
21ce849b 634 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
9a799d71 635 * @hw: pointer to hardware structure
21ce849b 636 * @ee_reg: EEPROM flag for polling
9a799d71 637 *
21ce849b
MC
638 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
639 * read or write is done respectively.
9a799d71 640 **/
5d5b7c39 641static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
9a799d71
AK
642{
643 u32 i;
644 u32 reg;
645 s32 status = IXGBE_ERR_EEPROM;
646
21ce849b
MC
647 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
648 if (ee_reg == IXGBE_NVM_POLL_READ)
649 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
650 else
651 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
652
653 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
9a799d71
AK
654 status = 0;
655 break;
656 }
657 udelay(5);
658 }
659 return status;
660}
661
c44ade9e
JB
662/**
663 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
664 * @hw: pointer to hardware structure
665 *
666 * Prepares EEPROM for access using bit-bang method. This function should
667 * be called before issuing a command to the EEPROM.
668 **/
669static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
670{
671 s32 status = 0;
fc1f2095 672 u32 eec = 0;
c44ade9e
JB
673 u32 i;
674
675 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
676 status = IXGBE_ERR_SWFW_SYNC;
677
678 if (status == 0) {
679 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
680
681 /* Request EEPROM Access */
682 eec |= IXGBE_EEC_REQ;
683 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
684
685 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
686 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
687 if (eec & IXGBE_EEC_GNT)
688 break;
689 udelay(5);
690 }
691
692 /* Release if grant not acquired */
693 if (!(eec & IXGBE_EEC_GNT)) {
694 eec &= ~IXGBE_EEC_REQ;
695 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
696 hw_dbg(hw, "Could not acquire EEPROM grant\n");
697
698 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
699 status = IXGBE_ERR_EEPROM;
700 }
701 }
702
703 /* Setup EEPROM for Read/Write */
704 if (status == 0) {
705 /* Clear CS and SK */
706 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
707 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
708 IXGBE_WRITE_FLUSH(hw);
709 udelay(1);
710 }
711 return status;
712}
713
9a799d71
AK
714/**
715 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
716 * @hw: pointer to hardware structure
717 *
718 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
719 **/
720static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
721{
722 s32 status = IXGBE_ERR_EEPROM;
723 u32 timeout;
724 u32 i;
725 u32 swsm;
726
727 /* Set timeout value based on size of EEPROM */
728 timeout = hw->eeprom.word_size + 1;
729
730 /* Get SMBI software semaphore between device drivers first */
731 for (i = 0; i < timeout; i++) {
732 /*
733 * If the SMBI bit is 0 when we read it, then the bit will be
734 * set and we have the semaphore
735 */
736 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
737 if (!(swsm & IXGBE_SWSM_SMBI)) {
738 status = 0;
739 break;
740 }
741 msleep(1);
742 }
743
744 /* Now get the semaphore between SW/FW through the SWESMBI bit */
745 if (status == 0) {
746 for (i = 0; i < timeout; i++) {
747 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
748
749 /* Set the SW EEPROM semaphore bit to request access */
750 swsm |= IXGBE_SWSM_SWESMBI;
751 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
752
753 /*
754 * If we set the bit successfully then we got the
755 * semaphore.
756 */
757 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
758 if (swsm & IXGBE_SWSM_SWESMBI)
759 break;
760
761 udelay(50);
762 }
763
764 /*
765 * Release semaphores and return error if SW EEPROM semaphore
766 * was not granted because we don't have access to the EEPROM
767 */
768 if (i >= timeout) {
769 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
b4617240 770 "not granted.\n");
9a799d71
AK
771 ixgbe_release_eeprom_semaphore(hw);
772 status = IXGBE_ERR_EEPROM;
773 }
774 }
775
776 return status;
777}
778
779/**
780 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
781 * @hw: pointer to hardware structure
782 *
783 * This function clears hardware semaphore bits.
784 **/
785static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
786{
787 u32 swsm;
788
789 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
790
791 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
792 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
793 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
3957d63d 794 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
795}
796
c44ade9e
JB
797/**
798 * ixgbe_ready_eeprom - Polls for EEPROM ready
799 * @hw: pointer to hardware structure
800 **/
801static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
802{
803 s32 status = 0;
804 u16 i;
805 u8 spi_stat_reg;
806
807 /*
808 * Read "Status Register" repeatedly until the LSB is cleared. The
809 * EEPROM will signal that the command has been completed by clearing
810 * bit 0 of the internal status register. If it's not cleared within
811 * 5 milliseconds, then error out.
812 */
813 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
814 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
815 IXGBE_EEPROM_OPCODE_BITS);
816 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
817 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
818 break;
819
820 udelay(5);
821 ixgbe_standby_eeprom(hw);
822 };
823
824 /*
825 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
826 * devices (and only 0-5mSec on 5V devices)
827 */
828 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
829 hw_dbg(hw, "SPI EEPROM Status error\n");
830 status = IXGBE_ERR_EEPROM;
831 }
832
833 return status;
834}
835
836/**
837 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
838 * @hw: pointer to hardware structure
839 **/
840static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
841{
842 u32 eec;
843
844 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
845
846 /* Toggle CS to flush commands */
847 eec |= IXGBE_EEC_CS;
848 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
849 IXGBE_WRITE_FLUSH(hw);
850 udelay(1);
851 eec &= ~IXGBE_EEC_CS;
852 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
853 IXGBE_WRITE_FLUSH(hw);
854 udelay(1);
855}
856
857/**
858 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
859 * @hw: pointer to hardware structure
860 * @data: data to send to the EEPROM
861 * @count: number of bits to shift out
862 **/
863static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
864 u16 count)
865{
866 u32 eec;
867 u32 mask;
868 u32 i;
869
870 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
871
872 /*
873 * Mask is used to shift "count" bits of "data" out to the EEPROM
874 * one bit at a time. Determine the starting bit based on count
875 */
876 mask = 0x01 << (count - 1);
877
878 for (i = 0; i < count; i++) {
879 /*
880 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
881 * "1", and then raising and then lowering the clock (the SK
882 * bit controls the clock input to the EEPROM). A "0" is
883 * shifted out to the EEPROM by setting "DI" to "0" and then
884 * raising and then lowering the clock.
885 */
886 if (data & mask)
887 eec |= IXGBE_EEC_DI;
888 else
889 eec &= ~IXGBE_EEC_DI;
890
891 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
892 IXGBE_WRITE_FLUSH(hw);
893
894 udelay(1);
895
896 ixgbe_raise_eeprom_clk(hw, &eec);
897 ixgbe_lower_eeprom_clk(hw, &eec);
898
899 /*
900 * Shift mask to signify next bit of data to shift in to the
901 * EEPROM
902 */
903 mask = mask >> 1;
904 };
905
906 /* We leave the "DI" bit set to "0" when we leave this routine. */
907 eec &= ~IXGBE_EEC_DI;
908 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
909 IXGBE_WRITE_FLUSH(hw);
910}
911
912/**
913 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
914 * @hw: pointer to hardware structure
915 **/
916static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
917{
918 u32 eec;
919 u32 i;
920 u16 data = 0;
921
922 /*
923 * In order to read a register from the EEPROM, we need to shift
924 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
925 * the clock input to the EEPROM (setting the SK bit), and then reading
926 * the value of the "DO" bit. During this "shifting in" process the
927 * "DI" bit should always be clear.
928 */
929 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
930
931 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
932
933 for (i = 0; i < count; i++) {
934 data = data << 1;
935 ixgbe_raise_eeprom_clk(hw, &eec);
936
937 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
938
939 eec &= ~(IXGBE_EEC_DI);
940 if (eec & IXGBE_EEC_DO)
941 data |= 1;
942
943 ixgbe_lower_eeprom_clk(hw, &eec);
944 }
945
946 return data;
947}
948
949/**
950 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
951 * @hw: pointer to hardware structure
952 * @eec: EEC register's current value
953 **/
954static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
955{
956 /*
957 * Raise the clock input to the EEPROM
958 * (setting the SK bit), then delay
959 */
960 *eec = *eec | IXGBE_EEC_SK;
961 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
962 IXGBE_WRITE_FLUSH(hw);
963 udelay(1);
964}
965
966/**
967 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
968 * @hw: pointer to hardware structure
969 * @eecd: EECD's current value
970 **/
971static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
972{
973 /*
974 * Lower the clock input to the EEPROM (clearing the SK bit), then
975 * delay
976 */
977 *eec = *eec & ~IXGBE_EEC_SK;
978 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
979 IXGBE_WRITE_FLUSH(hw);
980 udelay(1);
981}
982
983/**
984 * ixgbe_release_eeprom - Release EEPROM, release semaphores
985 * @hw: pointer to hardware structure
986 **/
987static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
988{
989 u32 eec;
990
991 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
992
993 eec |= IXGBE_EEC_CS; /* Pull CS high */
994 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
995
996 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
997 IXGBE_WRITE_FLUSH(hw);
998
999 udelay(1);
1000
1001 /* Stop requesting EEPROM access */
1002 eec &= ~IXGBE_EEC_REQ;
1003 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1004
1005 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1006}
1007
9a799d71
AK
1008/**
1009 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
1010 * @hw: pointer to hardware structure
1011 **/
1012static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
1013{
1014 u16 i;
1015 u16 j;
1016 u16 checksum = 0;
1017 u16 length = 0;
1018 u16 pointer = 0;
1019 u16 word = 0;
1020
1021 /* Include 0x0-0x3F in the checksum */
1022 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
c44ade9e 1023 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
9a799d71
AK
1024 hw_dbg(hw, "EEPROM read failed\n");
1025 break;
1026 }
1027 checksum += word;
1028 }
1029
1030 /* Include all data from pointers except for the fw pointer */
1031 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
c44ade9e 1032 hw->eeprom.ops.read(hw, i, &pointer);
9a799d71
AK
1033
1034 /* Make sure the pointer seems valid */
1035 if (pointer != 0xFFFF && pointer != 0) {
c44ade9e 1036 hw->eeprom.ops.read(hw, pointer, &length);
9a799d71
AK
1037
1038 if (length != 0xFFFF && length != 0) {
1039 for (j = pointer+1; j <= pointer+length; j++) {
c44ade9e 1040 hw->eeprom.ops.read(hw, j, &word);
9a799d71
AK
1041 checksum += word;
1042 }
1043 }
1044 }
1045 }
1046
1047 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1048
1049 return checksum;
1050}
1051
1052/**
c44ade9e 1053 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
9a799d71
AK
1054 * @hw: pointer to hardware structure
1055 * @checksum_val: calculated checksum
1056 *
1057 * Performs checksum calculation and validates the EEPROM checksum. If the
1058 * caller does not need checksum_val, the value can be NULL.
1059 **/
c44ade9e
JB
1060s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1061 u16 *checksum_val)
9a799d71
AK
1062{
1063 s32 status;
1064 u16 checksum;
1065 u16 read_checksum = 0;
1066
1067 /*
1068 * Read the first word from the EEPROM. If this times out or fails, do
1069 * not continue or we could be in for a very long wait while every
1070 * EEPROM read fails
1071 */
c44ade9e 1072 status = hw->eeprom.ops.read(hw, 0, &checksum);
9a799d71
AK
1073
1074 if (status == 0) {
1075 checksum = ixgbe_calc_eeprom_checksum(hw);
1076
c44ade9e 1077 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
9a799d71
AK
1078
1079 /*
1080 * Verify read checksum from EEPROM is the same as
1081 * calculated checksum
1082 */
1083 if (read_checksum != checksum)
1084 status = IXGBE_ERR_EEPROM_CHECKSUM;
1085
1086 /* If the user cares, return the calculated checksum */
1087 if (checksum_val)
1088 *checksum_val = checksum;
1089 } else {
1090 hw_dbg(hw, "EEPROM read failed\n");
1091 }
1092
1093 return status;
1094}
1095
c44ade9e
JB
1096/**
1097 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1098 * @hw: pointer to hardware structure
1099 **/
1100s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1101{
1102 s32 status;
1103 u16 checksum;
1104
1105 /*
1106 * Read the first word from the EEPROM. If this times out or fails, do
1107 * not continue or we could be in for a very long wait while every
1108 * EEPROM read fails
1109 */
1110 status = hw->eeprom.ops.read(hw, 0, &checksum);
1111
1112 if (status == 0) {
1113 checksum = ixgbe_calc_eeprom_checksum(hw);
1114 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1115 checksum);
1116 } else {
1117 hw_dbg(hw, "EEPROM read failed\n");
1118 }
1119
1120 return status;
1121}
1122
9a799d71
AK
1123/**
1124 * ixgbe_validate_mac_addr - Validate MAC address
1125 * @mac_addr: pointer to MAC address.
1126 *
1127 * Tests a MAC address to ensure it is a valid Individual Address
1128 **/
1129s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1130{
1131 s32 status = 0;
1132
1133 /* Make sure it is not a multicast address */
1134 if (IXGBE_IS_MULTICAST(mac_addr))
1135 status = IXGBE_ERR_INVALID_MAC_ADDR;
1136 /* Not a broadcast address */
1137 else if (IXGBE_IS_BROADCAST(mac_addr))
1138 status = IXGBE_ERR_INVALID_MAC_ADDR;
1139 /* Reject the zero address */
1140 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
c44ade9e 1141 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
9a799d71
AK
1142 status = IXGBE_ERR_INVALID_MAC_ADDR;
1143
1144 return status;
1145}
1146
1147/**
c44ade9e 1148 * ixgbe_set_rar_generic - Set Rx address register
9a799d71 1149 * @hw: pointer to hardware structure
9a799d71 1150 * @index: Receive address register to write
c44ade9e
JB
1151 * @addr: Address to put into receive address register
1152 * @vmdq: VMDq "set" or "pool" index
9a799d71
AK
1153 * @enable_addr: set flag that address is active
1154 *
1155 * Puts an ethernet address into a receive address register.
1156 **/
c44ade9e
JB
1157s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1158 u32 enable_addr)
9a799d71
AK
1159{
1160 u32 rar_low, rar_high;
c44ade9e
JB
1161 u32 rar_entries = hw->mac.num_rar_entries;
1162
1163 /* setup VMDq pool selection before this RAR gets enabled */
1164 hw->mac.ops.set_vmdq(hw, index, vmdq);
9a799d71 1165
c44ade9e
JB
1166 /* Make sure we are using a valid rar index range */
1167 if (index < rar_entries) {
b4617240 1168 /*
c44ade9e
JB
1169 * HW expects these in little endian so we reverse the byte
1170 * order from network order (big endian) to little endian
b4617240
PW
1171 */
1172 rar_low = ((u32)addr[0] |
1173 ((u32)addr[1] << 8) |
1174 ((u32)addr[2] << 16) |
1175 ((u32)addr[3] << 24));
c44ade9e
JB
1176 /*
1177 * Some parts put the VMDq setting in the extra RAH bits,
1178 * so save everything except the lower 16 bits that hold part
1179 * of the address and the address valid bit.
1180 */
1181 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1182 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1183 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
9a799d71 1184
b4617240
PW
1185 if (enable_addr != 0)
1186 rar_high |= IXGBE_RAH_AV;
9a799d71 1187
b4617240
PW
1188 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1189 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
c44ade9e
JB
1190 } else {
1191 hw_dbg(hw, "RAR index %d is out of range.\n", index);
a1868dc2 1192 return IXGBE_ERR_RAR_INDEX;
c44ade9e
JB
1193 }
1194
1195 return 0;
1196}
1197
1198/**
1199 * ixgbe_clear_rar_generic - Remove Rx address register
1200 * @hw: pointer to hardware structure
1201 * @index: Receive address register to write
1202 *
1203 * Clears an ethernet address from a receive address register.
1204 **/
1205s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1206{
1207 u32 rar_high;
1208 u32 rar_entries = hw->mac.num_rar_entries;
1209
1210 /* Make sure we are using a valid rar index range */
1211 if (index < rar_entries) {
1212 /*
1213 * Some parts put the VMDq setting in the extra RAH bits,
1214 * so save everything except the lower 16 bits that hold part
1215 * of the address and the address valid bit.
1216 */
1217 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1218 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1219
1220 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1221 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1222 } else {
1223 hw_dbg(hw, "RAR index %d is out of range.\n", index);
a1868dc2 1224 return IXGBE_ERR_RAR_INDEX;
c44ade9e
JB
1225 }
1226
1227 /* clear VMDq pool/queue selection for this RAR */
1228 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
9a799d71
AK
1229
1230 return 0;
1231}
1232
1233/**
c44ade9e
JB
1234 * ixgbe_enable_rar - Enable Rx address register
1235 * @hw: pointer to hardware structure
1236 * @index: index into the RAR table
1237 *
1238 * Enables the select receive address register.
1239 **/
1240static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1241{
1242 u32 rar_high;
1243
1244 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1245 rar_high |= IXGBE_RAH_AV;
1246 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1247}
1248
1249/**
1250 * ixgbe_disable_rar - Disable Rx address register
1251 * @hw: pointer to hardware structure
1252 * @index: index into the RAR table
1253 *
1254 * Disables the select receive address register.
1255 **/
1256static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1257{
1258 u32 rar_high;
1259
1260 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1261 rar_high &= (~IXGBE_RAH_AV);
1262 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1263}
1264
1265/**
1266 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
9a799d71
AK
1267 * @hw: pointer to hardware structure
1268 *
1269 * Places the MAC address in receive address register 0 and clears the rest
c44ade9e 1270 * of the receive address registers. Clears the multicast table. Assumes
9a799d71
AK
1271 * the receiver is in reset when the routine is called.
1272 **/
c44ade9e 1273s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
9a799d71
AK
1274{
1275 u32 i;
2c5645cf 1276 u32 rar_entries = hw->mac.num_rar_entries;
9a799d71
AK
1277
1278 /*
1279 * If the current mac address is valid, assume it is a software override
1280 * to the permanent address.
1281 * Otherwise, use the permanent address from the eeprom.
1282 */
1283 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1284 IXGBE_ERR_INVALID_MAC_ADDR) {
1285 /* Get the MAC address from the RAR0 for later reference */
c44ade9e 1286 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
9a799d71 1287
ce7194d8 1288 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
9a799d71
AK
1289 } else {
1290 /* Setup the receive address. */
1291 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
ce7194d8 1292 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
9a799d71 1293
c44ade9e 1294 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
9a799d71 1295 }
c44ade9e 1296 hw->addr_ctrl.overflow_promisc = 0;
9a799d71
AK
1297
1298 hw->addr_ctrl.rar_used_count = 1;
1299
1300 /* Zero out the other receive addresses. */
c44ade9e 1301 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
9a799d71
AK
1302 for (i = 1; i < rar_entries; i++) {
1303 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1304 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1305 }
1306
1307 /* Clear the MTA */
1308 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1309 hw->addr_ctrl.mta_in_use = 0;
1310 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1311
1312 hw_dbg(hw, " Clearing MTA\n");
2c5645cf 1313 for (i = 0; i < hw->mac.mcft_size; i++)
9a799d71
AK
1314 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1315
c44ade9e
JB
1316 if (hw->mac.ops.init_uta_tables)
1317 hw->mac.ops.init_uta_tables(hw);
1318
9a799d71
AK
1319 return 0;
1320}
1321
2c5645cf
CL
1322/**
1323 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1324 * @hw: pointer to hardware structure
1325 * @addr: new address
1326 *
1327 * Adds it to unused receive address register or goes into promiscuous mode.
1328 **/
c44ade9e 1329static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2c5645cf
CL
1330{
1331 u32 rar_entries = hw->mac.num_rar_entries;
1332 u32 rar;
1333
1334 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1335 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1336
1337 /*
1338 * Place this address in the RAR if there is room,
1339 * else put the controller into promiscuous mode
1340 */
1341 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1342 rar = hw->addr_ctrl.rar_used_count -
1343 hw->addr_ctrl.mc_addr_in_rar_count;
c44ade9e 1344 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2c5645cf
CL
1345 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1346 hw->addr_ctrl.rar_used_count++;
1347 } else {
1348 hw->addr_ctrl.overflow_promisc++;
1349 }
1350
1351 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1352}
1353
1354/**
c44ade9e 1355 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2c5645cf 1356 * @hw: pointer to hardware structure
32e7bfc4 1357 * @netdev: pointer to net device structure
2c5645cf
CL
1358 *
1359 * The given list replaces any existing list. Clears the secondary addrs from
1360 * receive address registers. Uses unused receive address registers for the
1361 * first secondary addresses, and falls back to promiscuous mode as needed.
1362 *
1363 * Drivers using secondary unicast addresses must set user_set_promisc when
1364 * manually putting the device into promiscuous mode.
1365 **/
ccffad25 1366s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
32e7bfc4 1367 struct net_device *netdev)
2c5645cf 1368{
2c5645cf
CL
1369 u32 i;
1370 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1371 u32 uc_addr_in_use;
1372 u32 fctrl;
ccffad25 1373 struct netdev_hw_addr *ha;
2c5645cf
CL
1374
1375 /*
1376 * Clear accounting of old secondary address list,
1377 * don't count RAR[0]
1378 */
495dce12 1379 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2c5645cf
CL
1380 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1381 hw->addr_ctrl.overflow_promisc = 0;
1382
1383 /* Zero out the other receive addresses */
91152c32
SN
1384 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1385 for (i = 0; i < uc_addr_in_use; i++) {
1386 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1387 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2c5645cf
CL
1388 }
1389
1390 /* Add the new addresses */
32e7bfc4 1391 netdev_for_each_uc_addr(ha, netdev) {
2c5645cf 1392 hw_dbg(hw, " Adding the secondary addresses:\n");
ccffad25 1393 ixgbe_add_uc_addr(hw, ha->addr, 0);
2c5645cf
CL
1394 }
1395
1396 if (hw->addr_ctrl.overflow_promisc) {
1397 /* enable promisc if not already in overflow or set by user */
1398 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1399 hw_dbg(hw, " Entering address overflow promisc mode\n");
1400 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1401 fctrl |= IXGBE_FCTRL_UPE;
1402 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
e433ea1f 1403 hw->addr_ctrl.uc_set_promisc = true;
2c5645cf
CL
1404 }
1405 } else {
1406 /* only disable if set by overflow, not by user */
e433ea1f
ET
1407 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1408 !(hw->addr_ctrl.user_set_promisc)) {
2c5645cf
CL
1409 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1410 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1411 fctrl &= ~IXGBE_FCTRL_UPE;
1412 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
e433ea1f 1413 hw->addr_ctrl.uc_set_promisc = false;
2c5645cf
CL
1414 }
1415 }
1416
c44ade9e 1417 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
2c5645cf
CL
1418 return 0;
1419}
1420
9a799d71
AK
1421/**
1422 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1423 * @hw: pointer to hardware structure
1424 * @mc_addr: the multicast address
1425 *
1426 * Extracts the 12 bits, from a multicast address, to determine which
1427 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1428 * incoming rx multicast addresses, to determine the bit-vector to check in
1429 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
c44ade9e 1430 * by the MO field of the MCSTCTRL. The MO field is set during initialization
9a799d71
AK
1431 * to mc_filter_type.
1432 **/
1433static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1434{
1435 u32 vector = 0;
1436
1437 switch (hw->mac.mc_filter_type) {
b4617240 1438 case 0: /* use bits [47:36] of the address */
9a799d71
AK
1439 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1440 break;
b4617240 1441 case 1: /* use bits [46:35] of the address */
9a799d71
AK
1442 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1443 break;
b4617240 1444 case 2: /* use bits [45:34] of the address */
9a799d71
AK
1445 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1446 break;
b4617240 1447 case 3: /* use bits [43:32] of the address */
9a799d71
AK
1448 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1449 break;
b4617240 1450 default: /* Invalid mc_filter_type */
9a799d71
AK
1451 hw_dbg(hw, "MC filter type param set incorrectly\n");
1452 break;
1453 }
1454
1455 /* vector can only be 12-bits or boundary will be exceeded */
1456 vector &= 0xFFF;
1457 return vector;
1458}
1459
1460/**
1461 * ixgbe_set_mta - Set bit-vector in multicast table
1462 * @hw: pointer to hardware structure
1463 * @hash_value: Multicast address hash value
1464 *
1465 * Sets the bit-vector in the multicast table.
1466 **/
1467static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1468{
1469 u32 vector;
1470 u32 vector_bit;
1471 u32 vector_reg;
1472 u32 mta_reg;
1473
1474 hw->addr_ctrl.mta_in_use++;
1475
1476 vector = ixgbe_mta_vector(hw, mc_addr);
1477 hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
1478
1479 /*
1480 * The MTA is a register array of 128 32-bit registers. It is treated
1481 * like an array of 4096 bits. We want to set bit
1482 * BitArray[vector_value]. So we figure out what register the bit is
1483 * in, read it, OR in the new bit, then write back the new value. The
1484 * register is determined by the upper 7 bits of the vector value and
1485 * the bit within that register are determined by the lower 5 bits of
1486 * the value.
1487 */
1488 vector_reg = (vector >> 5) & 0x7F;
1489 vector_bit = vector & 0x1F;
1490 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
1491 mta_reg |= (1 << vector_bit);
1492 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1493}
1494
9a799d71 1495/**
c44ade9e 1496 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
9a799d71 1497 * @hw: pointer to hardware structure
2853eb89 1498 * @netdev: pointer to net device structure
9a799d71
AK
1499 *
1500 * The given list replaces any existing list. Clears the MC addrs from receive
c44ade9e 1501 * address registers and the multicast table. Uses unused receive address
9a799d71
AK
1502 * registers for the first multicast addresses, and hashes the rest into the
1503 * multicast table.
1504 **/
2853eb89
JP
1505s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1506 struct net_device *netdev)
9a799d71 1507{
22bedad3 1508 struct netdev_hw_addr *ha;
9a799d71 1509 u32 i;
9a799d71
AK
1510
1511 /*
1512 * Set the new number of MC addresses that we are being requested to
1513 * use.
1514 */
2853eb89 1515 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
9a799d71
AK
1516 hw->addr_ctrl.mta_in_use = 0;
1517
9a799d71
AK
1518 /* Clear the MTA */
1519 hw_dbg(hw, " Clearing MTA\n");
2c5645cf 1520 for (i = 0; i < hw->mac.mcft_size; i++)
9a799d71
AK
1521 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1522
1523 /* Add the new addresses */
22bedad3 1524 netdev_for_each_mc_addr(ha, netdev) {
9a799d71 1525 hw_dbg(hw, " Adding the multicast addresses:\n");
22bedad3 1526 ixgbe_set_mta(hw, ha->addr);
9a799d71
AK
1527 }
1528
1529 /* Enable mta */
1530 if (hw->addr_ctrl.mta_in_use > 0)
1531 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
b4617240 1532 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
9a799d71 1533
c44ade9e 1534 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
9a799d71
AK
1535 return 0;
1536}
1537
1538/**
c44ade9e 1539 * ixgbe_enable_mc_generic - Enable multicast address in RAR
9a799d71
AK
1540 * @hw: pointer to hardware structure
1541 *
c44ade9e 1542 * Enables multicast address in RAR and the use of the multicast hash table.
9a799d71 1543 **/
c44ade9e 1544s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
9a799d71 1545{
c44ade9e
JB
1546 u32 i;
1547 u32 rar_entries = hw->mac.num_rar_entries;
1548 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
9a799d71 1549
c44ade9e
JB
1550 if (a->mc_addr_in_rar_count > 0)
1551 for (i = (rar_entries - a->mc_addr_in_rar_count);
1552 i < rar_entries; i++)
1553 ixgbe_enable_rar(hw, i);
9a799d71 1554
c44ade9e
JB
1555 if (a->mta_in_use > 0)
1556 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1557 hw->mac.mc_filter_type);
9a799d71
AK
1558
1559 return 0;
1560}
1561
1562/**
c44ade9e 1563 * ixgbe_disable_mc_generic - Disable multicast address in RAR
9a799d71 1564 * @hw: pointer to hardware structure
9a799d71 1565 *
c44ade9e 1566 * Disables multicast address in RAR and the use of the multicast hash table.
9a799d71 1567 **/
c44ade9e 1568s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
9a799d71 1569{
c44ade9e
JB
1570 u32 i;
1571 u32 rar_entries = hw->mac.num_rar_entries;
1572 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2b9ade93 1573
c44ade9e
JB
1574 if (a->mc_addr_in_rar_count > 0)
1575 for (i = (rar_entries - a->mc_addr_in_rar_count);
1576 i < rar_entries; i++)
1577 ixgbe_disable_rar(hw, i);
9a799d71 1578
c44ade9e
JB
1579 if (a->mta_in_use > 0)
1580 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
9a799d71
AK
1581
1582 return 0;
1583}
1584
11afc1b1 1585/**
620fa036 1586 * ixgbe_fc_enable_generic - Enable flow control
11afc1b1
PW
1587 * @hw: pointer to hardware structure
1588 * @packetbuf_num: packet buffer number (0-7)
1589 *
1590 * Enable flow control according to the current settings.
1591 **/
620fa036 1592s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
11afc1b1
PW
1593{
1594 s32 ret_val = 0;
620fa036 1595 u32 mflcn_reg, fccfg_reg;
11afc1b1 1596 u32 reg;
70b77628 1597 u32 rx_pba_size;
16b61beb 1598 u32 fcrtl, fcrth;
70b77628
PWJ
1599
1600#ifdef CONFIG_DCB
1601 if (hw->fc.requested_mode == ixgbe_fc_pfc)
1602 goto out;
1603
1604#endif /* CONFIG_DCB */
620fa036
MC
1605 /* Negotiate the fc mode to use */
1606 ret_val = ixgbe_fc_autoneg(hw);
1607 if (ret_val)
1608 goto out;
11afc1b1 1609
620fa036 1610 /* Disable any previous flow control settings */
11afc1b1
PW
1611 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1612 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
1613
1614 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1615 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
1616
1617 /*
1618 * The possible values of fc.current_mode are:
1619 * 0: Flow control is completely disabled
1620 * 1: Rx flow control is enabled (we can receive pause frames,
1621 * but not send pause frames).
bb3daa4a
PW
1622 * 2: Tx flow control is enabled (we can send pause frames but
1623 * we do not support receiving pause frames).
11afc1b1 1624 * 3: Both Rx and Tx flow control (symmetric) are enabled.
bb3daa4a 1625 * 4: Priority Flow Control is enabled.
11afc1b1
PW
1626 * other: Invalid.
1627 */
1628 switch (hw->fc.current_mode) {
1629 case ixgbe_fc_none:
620fa036
MC
1630 /*
1631 * Flow control is disabled by software override or autoneg.
1632 * The code below will actually disable it in the HW.
1633 */
11afc1b1
PW
1634 break;
1635 case ixgbe_fc_rx_pause:
1636 /*
1637 * Rx Flow control is enabled and Tx Flow control is
1638 * disabled by software override. Since there really
1639 * isn't a way to advertise that we are capable of RX
1640 * Pause ONLY, we will advertise that we support both
1641 * symmetric and asymmetric Rx PAUSE. Later, we will
1642 * disable the adapter's ability to send PAUSE frames.
1643 */
1644 mflcn_reg |= IXGBE_MFLCN_RFCE;
1645 break;
1646 case ixgbe_fc_tx_pause:
1647 /*
1648 * Tx Flow control is enabled, and Rx Flow control is
1649 * disabled by software override.
1650 */
1651 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1652 break;
1653 case ixgbe_fc_full:
1654 /* Flow control (both Rx and Tx) is enabled by SW override. */
1655 mflcn_reg |= IXGBE_MFLCN_RFCE;
1656 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1657 break;
bb3daa4a
PW
1658#ifdef CONFIG_DCB
1659 case ixgbe_fc_pfc:
1660 goto out;
1661 break;
620fa036 1662#endif /* CONFIG_DCB */
11afc1b1
PW
1663 default:
1664 hw_dbg(hw, "Flow control param set incorrectly\n");
539e5f02 1665 ret_val = IXGBE_ERR_CONFIG;
11afc1b1
PW
1666 goto out;
1667 break;
1668 }
1669
620fa036 1670 /* Set 802.3x based flow control settings. */
2132d381 1671 mflcn_reg |= IXGBE_MFLCN_DPF;
11afc1b1
PW
1672 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1673 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1674
16b61beb
JF
1675 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1676 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
620fa036 1677
16b61beb
JF
1678 fcrth = (rx_pba_size - hw->fc.high_water) << 10;
1679 fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
264857b8 1680
16b61beb
JF
1681 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1682 fcrth |= IXGBE_FCRTH_FCEN;
1683 if (hw->fc.send_xon)
1684 fcrtl |= IXGBE_FCRTL_XONE;
11afc1b1
PW
1685 }
1686
16b61beb
JF
1687 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
1688 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
1689
11afc1b1 1690 /* Configure pause time (2 TCs per register) */
70b77628 1691 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
11afc1b1
PW
1692 if ((packetbuf_num & 1) == 0)
1693 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
1694 else
1695 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
1696 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
1697
1698 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
1699
1700out:
1701 return ret_val;
1702}
1703
0ecc061d
PWJ
1704/**
1705 * ixgbe_fc_autoneg - Configure flow control
1706 * @hw: pointer to hardware structure
1707 *
620fa036
MC
1708 * Compares our advertised flow control capabilities to those advertised by
1709 * our link partner, and determines the proper flow control mode to use.
0ecc061d
PWJ
1710 **/
1711s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1712{
1713 s32 ret_val = 0;
620fa036
MC
1714 ixgbe_link_speed speed;
1715 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
539e5f02 1716 u32 links2, anlp1_reg, autoc_reg, links;
620fa036 1717 bool link_up;
0ecc061d
PWJ
1718
1719 /*
620fa036
MC
1720 * AN should have completed when the cable was plugged in.
1721 * Look for reasons to bail out. Bail out if:
1722 * - FC autoneg is disabled, or if
539e5f02 1723 * - link is not up.
620fa036 1724 *
539e5f02 1725 * Since we're being called from an LSC, link is already known to be up.
620fa036 1726 * So use link_up_wait_to_complete=false.
0ecc061d 1727 */
620fa036 1728 hw->mac.ops.check_link(hw, &speed, &link_up, false);
539e5f02
PWJ
1729
1730 if (hw->fc.disable_fc_autoneg || (!link_up)) {
620fa036
MC
1731 hw->fc.fc_was_autonegged = false;
1732 hw->fc.current_mode = hw->fc.requested_mode;
0ecc061d
PWJ
1733 goto out;
1734 }
1735
539e5f02
PWJ
1736 /*
1737 * On backplane, bail out if
1738 * - backplane autoneg was not completed, or if
000c486d 1739 * - we are 82599 and link partner is not AN enabled
539e5f02
PWJ
1740 */
1741 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1742 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
000c486d 1743 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
539e5f02
PWJ
1744 hw->fc.fc_was_autonegged = false;
1745 hw->fc.current_mode = hw->fc.requested_mode;
1746 goto out;
1747 }
000c486d
DS
1748
1749 if (hw->mac.type == ixgbe_mac_82599EB) {
1750 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1751 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1752 hw->fc.fc_was_autonegged = false;
1753 hw->fc.current_mode = hw->fc.requested_mode;
1754 goto out;
1755 }
1756 }
539e5f02
PWJ
1757 }
1758
1759 /*
1760 * On multispeed fiber at 1g, bail out if
1761 * - link is up but AN did not complete, or if
1762 * - link is up and AN completed but timed out
1763 */
1764 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1765 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1766 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1767 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1768 hw->fc.fc_was_autonegged = false;
1769 hw->fc.current_mode = hw->fc.requested_mode;
1770 goto out;
1771 }
1772 }
1773
9bbe3a57
PW
1774 /*
1775 * Bail out on
1776 * - copper or CX4 adapters
1777 * - fiber adapters running at 10gig
1778 */
1779 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
1780 (hw->phy.media_type == ixgbe_media_type_cx4) ||
1781 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1782 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1783 hw->fc.fc_was_autonegged = false;
1784 hw->fc.current_mode = hw->fc.requested_mode;
1785 goto out;
1786 }
1787
0ecc061d
PWJ
1788 /*
1789 * Read the AN advertisement and LP ability registers and resolve
1790 * local flow control settings accordingly
1791 */
539e5f02
PWJ
1792 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1793 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1794 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1795 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1796 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1797 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1798 /*
1799 * Now we need to check if the user selected Rx ONLY
1800 * of pause frames. In this case, we had to advertise
1801 * FULL flow control because we could not advertise RX
1802 * ONLY. Hence, we must now check to see if we need to
1803 * turn OFF the TRANSMISSION of PAUSE frames.
1804 */
1805 if (hw->fc.requested_mode == ixgbe_fc_full) {
1806 hw->fc.current_mode = ixgbe_fc_full;
1807 hw_dbg(hw, "Flow Control = FULL.\n");
1808 } else {
1809 hw->fc.current_mode = ixgbe_fc_rx_pause;
1810 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1811 }
1812 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1813 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1814 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1815 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1816 hw->fc.current_mode = ixgbe_fc_tx_pause;
1817 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1818 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1819 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1820 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1821 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1822 hw->fc.current_mode = ixgbe_fc_rx_pause;
1823 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1824 } else {
1825 hw->fc.current_mode = ixgbe_fc_none;
1826 hw_dbg(hw, "Flow Control = NONE.\n");
1827 }
1828 }
1829
1830 if (hw->phy.media_type == ixgbe_media_type_backplane) {
0ecc061d 1831 /*
539e5f02
PWJ
1832 * Read the 10g AN autoc and LP ability registers and resolve
1833 * local flow control settings accordingly
0ecc061d 1834 */
539e5f02
PWJ
1835 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1836 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1837
1838 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1839 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1840 /*
1841 * Now we need to check if the user selected Rx ONLY
1842 * of pause frames. In this case, we had to advertise
1843 * FULL flow control because we could not advertise RX
1844 * ONLY. Hence, we must now check to see if we need to
1845 * turn OFF the TRANSMISSION of PAUSE frames.
1846 */
1847 if (hw->fc.requested_mode == ixgbe_fc_full) {
1848 hw->fc.current_mode = ixgbe_fc_full;
1849 hw_dbg(hw, "Flow Control = FULL.\n");
1850 } else {
1851 hw->fc.current_mode = ixgbe_fc_rx_pause;
1852 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1853 }
1854 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1855 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1856 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1857 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1858 hw->fc.current_mode = ixgbe_fc_tx_pause;
1859 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1860 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1861 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1862 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1863 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
0ecc061d
PWJ
1864 hw->fc.current_mode = ixgbe_fc_rx_pause;
1865 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
539e5f02
PWJ
1866 } else {
1867 hw->fc.current_mode = ixgbe_fc_none;
1868 hw_dbg(hw, "Flow Control = NONE.\n");
0ecc061d 1869 }
0ecc061d 1870 }
620fa036
MC
1871 /* Record that current_mode is the result of a successful autoneg */
1872 hw->fc.fc_was_autonegged = true;
1873
0ecc061d
PWJ
1874out:
1875 return ret_val;
1876}
1877
11afc1b1 1878/**
620fa036 1879 * ixgbe_setup_fc - Set up flow control
11afc1b1
PW
1880 * @hw: pointer to hardware structure
1881 *
620fa036 1882 * Called at init time to set up flow control.
11afc1b1 1883 **/
7b25cdba 1884static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
11afc1b1
PW
1885{
1886 s32 ret_val = 0;
620fa036 1887 u32 reg;
11afc1b1 1888
bb3daa4a
PW
1889#ifdef CONFIG_DCB
1890 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
1891 hw->fc.current_mode = hw->fc.requested_mode;
1892 goto out;
1893 }
1894
1895#endif
11afc1b1
PW
1896 /* Validate the packetbuf configuration */
1897 if (packetbuf_num < 0 || packetbuf_num > 7) {
1898 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
1899 "is 0-7\n", packetbuf_num);
1900 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1901 goto out;
1902 }
1903
1904 /*
1905 * Validate the water mark configuration. Zero water marks are invalid
1906 * because it causes the controller to just blast out fc packets.
1907 */
1908 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
620fa036
MC
1909 hw_dbg(hw, "Invalid water mark configuration\n");
1910 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1911 goto out;
11afc1b1
PW
1912 }
1913
1914 /*
1915 * Validate the requested mode. Strict IEEE mode does not allow
620fa036 1916 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
11afc1b1
PW
1917 */
1918 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
1919 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
1920 "IEEE mode\n");
1921 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1922 goto out;
1923 }
1924
1925 /*
1926 * 10gig parts do not have a word in the EEPROM to determine the
1927 * default flow control setting, so we explicitly set it to full.
1928 */
1929 if (hw->fc.requested_mode == ixgbe_fc_default)
1930 hw->fc.requested_mode = ixgbe_fc_full;
1931
1932 /*
620fa036
MC
1933 * Set up the 1G flow control advertisement registers so the HW will be
1934 * able to do fc autoneg once the cable is plugged in. If we end up
1935 * using 10g instead, this is harmless.
11afc1b1 1936 */
620fa036 1937 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
11afc1b1 1938
620fa036
MC
1939 /*
1940 * The possible values of fc.requested_mode are:
1941 * 0: Flow control is completely disabled
1942 * 1: Rx flow control is enabled (we can receive pause frames,
1943 * but not send pause frames).
1944 * 2: Tx flow control is enabled (we can send pause frames but
1945 * we do not support receiving pause frames).
1946 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1947#ifdef CONFIG_DCB
1948 * 4: Priority Flow Control is enabled.
1949#endif
1950 * other: Invalid.
1951 */
1952 switch (hw->fc.requested_mode) {
1953 case ixgbe_fc_none:
1954 /* Flow control completely disabled by software override. */
1955 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1956 break;
1957 case ixgbe_fc_rx_pause:
1958 /*
1959 * Rx Flow control is enabled and Tx Flow control is
1960 * disabled by software override. Since there really
1961 * isn't a way to advertise that we are capable of RX
1962 * Pause ONLY, we will advertise that we support both
1963 * symmetric and asymmetric Rx PAUSE. Later, we will
1964 * disable the adapter's ability to send PAUSE frames.
1965 */
1966 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1967 break;
1968 case ixgbe_fc_tx_pause:
1969 /*
1970 * Tx Flow control is enabled, and Rx Flow control is
1971 * disabled by software override.
1972 */
1973 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
1974 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
1975 break;
1976 case ixgbe_fc_full:
1977 /* Flow control (both Rx and Tx) is enabled by SW override. */
1978 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1979 break;
1980#ifdef CONFIG_DCB
1981 case ixgbe_fc_pfc:
11afc1b1 1982 goto out;
620fa036
MC
1983 break;
1984#endif /* CONFIG_DCB */
1985 default:
1986 hw_dbg(hw, "Flow control param set incorrectly\n");
539e5f02 1987 ret_val = IXGBE_ERR_CONFIG;
620fa036
MC
1988 goto out;
1989 break;
1990 }
1991
1992 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1993 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
11afc1b1 1994
620fa036
MC
1995 /* Disable AN timeout */
1996 if (hw->fc.strict_ieee)
1997 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
1998
1999 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2000 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
11afc1b1 2001
539e5f02
PWJ
2002 /*
2003 * Set up the 10G flow control advertisement registers so the HW
2004 * can do fc autoneg once the cable is plugged in. If we end up
2005 * using 1g instead, this is harmless.
2006 */
2007 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2008
2009 /*
2010 * The possible values of fc.requested_mode are:
2011 * 0: Flow control is completely disabled
2012 * 1: Rx flow control is enabled (we can receive pause frames,
2013 * but not send pause frames).
2014 * 2: Tx flow control is enabled (we can send pause frames but
2015 * we do not support receiving pause frames).
2016 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2017 * other: Invalid.
2018 */
2019 switch (hw->fc.requested_mode) {
2020 case ixgbe_fc_none:
2021 /* Flow control completely disabled by software override. */
2022 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2023 break;
2024 case ixgbe_fc_rx_pause:
2025 /*
2026 * Rx Flow control is enabled and Tx Flow control is
2027 * disabled by software override. Since there really
2028 * isn't a way to advertise that we are capable of RX
2029 * Pause ONLY, we will advertise that we support both
2030 * symmetric and asymmetric Rx PAUSE. Later, we will
2031 * disable the adapter's ability to send PAUSE frames.
2032 */
2033 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2034 break;
2035 case ixgbe_fc_tx_pause:
2036 /*
2037 * Tx Flow control is enabled, and Rx Flow control is
2038 * disabled by software override.
2039 */
2040 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2041 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2042 break;
2043 case ixgbe_fc_full:
2044 /* Flow control (both Rx and Tx) is enabled by SW override. */
2045 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2046 break;
2047#ifdef CONFIG_DCB
2048 case ixgbe_fc_pfc:
2049 goto out;
2050 break;
2051#endif /* CONFIG_DCB */
2052 default:
2053 hw_dbg(hw, "Flow control param set incorrectly\n");
2054 ret_val = IXGBE_ERR_CONFIG;
2055 goto out;
2056 break;
2057 }
2058 /*
2059 * AUTOC restart handles negotiation of 1G and 10G. There is
2060 * no need to set the PCS1GCTL register.
2061 */
2062 reg |= IXGBE_AUTOC_AN_RESTART;
2063 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2064 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2065
11afc1b1
PW
2066out:
2067 return ret_val;
2068}
2069
9a799d71
AK
2070/**
2071 * ixgbe_disable_pcie_master - Disable PCI-express master access
2072 * @hw: pointer to hardware structure
2073 *
2074 * Disables PCI-Express master access and verifies there are no pending
2075 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2076 * bit hasn't caused the master requests to be disabled, else 0
2077 * is returned signifying master requests disabled.
2078 **/
2079s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2080{
c44ade9e
JB
2081 u32 i;
2082 u32 reg_val;
2083 u32 number_of_queues;
9a799d71
AK
2084 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2085
c44ade9e
JB
2086 /* Disable the receive unit by stopping each queue */
2087 number_of_queues = hw->mac.max_rx_queues;
2088 for (i = 0; i < number_of_queues; i++) {
2089 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
2090 if (reg_val & IXGBE_RXDCTL_ENABLE) {
2091 reg_val &= ~IXGBE_RXDCTL_ENABLE;
2092 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
2093 }
2094 }
2095
2096 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
2097 reg_val |= IXGBE_CTRL_GIO_DIS;
2098 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
9a799d71
AK
2099
2100 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2101 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
2102 status = 0;
2103 break;
2104 }
2105 udelay(100);
2106 }
2107
2108 return status;
2109}
2110
2111
2112/**
c44ade9e 2113 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
9a799d71 2114 * @hw: pointer to hardware structure
c44ade9e 2115 * @mask: Mask to specify which semaphore to acquire
9a799d71 2116 *
c44ade9e 2117 * Acquires the SWFW semaphore thought the GSSR register for the specified
9a799d71
AK
2118 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2119 **/
2120s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2121{
2122 u32 gssr;
2123 u32 swmask = mask;
2124 u32 fwmask = mask << 5;
2125 s32 timeout = 200;
2126
2127 while (timeout) {
2128 if (ixgbe_get_eeprom_semaphore(hw))
539e5f02 2129 return IXGBE_ERR_SWFW_SYNC;
9a799d71
AK
2130
2131 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2132 if (!(gssr & (fwmask | swmask)))
2133 break;
2134
2135 /*
2136 * Firmware currently using resource (fwmask) or other software
2137 * thread currently using resource (swmask)
2138 */
2139 ixgbe_release_eeprom_semaphore(hw);
2140 msleep(5);
2141 timeout--;
2142 }
2143
2144 if (!timeout) {
2145 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
539e5f02 2146 return IXGBE_ERR_SWFW_SYNC;
9a799d71
AK
2147 }
2148
2149 gssr |= swmask;
2150 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2151
2152 ixgbe_release_eeprom_semaphore(hw);
2153 return 0;
2154}
2155
2156/**
2157 * ixgbe_release_swfw_sync - Release SWFW semaphore
2158 * @hw: pointer to hardware structure
c44ade9e 2159 * @mask: Mask to specify which semaphore to release
9a799d71 2160 *
c44ade9e 2161 * Releases the SWFW semaphore thought the GSSR register for the specified
9a799d71
AK
2162 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2163 **/
2164void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2165{
2166 u32 gssr;
2167 u32 swmask = mask;
2168
2169 ixgbe_get_eeprom_semaphore(hw);
2170
2171 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2172 gssr &= ~swmask;
2173 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2174
2175 ixgbe_release_eeprom_semaphore(hw);
2176}
2177
11afc1b1
PW
2178/**
2179 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2180 * @hw: pointer to hardware structure
2181 * @regval: register value to write to RXCTRL
2182 *
2183 * Enables the Rx DMA unit
2184 **/
2185s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2186{
2187 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2188
2189 return 0;
2190}
87c12017
PW
2191
2192/**
2193 * ixgbe_blink_led_start_generic - Blink LED based on index.
2194 * @hw: pointer to hardware structure
2195 * @index: led number to blink
2196 **/
2197s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2198{
2199 ixgbe_link_speed speed = 0;
2200 bool link_up = 0;
2201 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2202 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2203
2204 /*
2205 * Link must be up to auto-blink the LEDs;
2206 * Force it if link is down.
2207 */
2208 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2209
2210 if (!link_up) {
50ac58ba 2211 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
87c12017
PW
2212 autoc_reg |= IXGBE_AUTOC_FLU;
2213 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2214 msleep(10);
2215 }
2216
2217 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2218 led_reg |= IXGBE_LED_BLINK(index);
2219 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2220 IXGBE_WRITE_FLUSH(hw);
2221
2222 return 0;
2223}
2224
2225/**
2226 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2227 * @hw: pointer to hardware structure
2228 * @index: led number to stop blinking
2229 **/
2230s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2231{
2232 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2233 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2234
2235 autoc_reg &= ~IXGBE_AUTOC_FLU;
2236 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2237 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2238
2239 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2240 led_reg &= ~IXGBE_LED_BLINK(index);
2241 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2242 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2243 IXGBE_WRITE_FLUSH(hw);
2244
2245 return 0;
2246}
21ce849b
MC
2247
2248/**
2249 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2250 * @hw: pointer to hardware structure
2251 * @san_mac_offset: SAN MAC address offset
2252 *
2253 * This function will read the EEPROM location for the SAN MAC address
2254 * pointer, and returns the value at that location. This is used in both
2255 * get and set mac_addr routines.
2256 **/
2257static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2258 u16 *san_mac_offset)
2259{
2260 /*
2261 * First read the EEPROM pointer to see if the MAC addresses are
2262 * available.
2263 */
2264 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2265
2266 return 0;
2267}
2268
2269/**
2270 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2271 * @hw: pointer to hardware structure
2272 * @san_mac_addr: SAN MAC address
2273 *
2274 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2275 * per-port, so set_lan_id() must be called before reading the addresses.
2276 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2277 * upon for non-SFP connections, so we must call it here.
2278 **/
2279s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2280{
2281 u16 san_mac_data, san_mac_offset;
2282 u8 i;
2283
2284 /*
2285 * First read the EEPROM pointer to see if the MAC addresses are
2286 * available. If they're not, no point in calling set_lan_id() here.
2287 */
2288 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2289
2290 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2291 /*
2292 * No addresses available in this EEPROM. It's not an
2293 * error though, so just wipe the local address and return.
2294 */
2295 for (i = 0; i < 6; i++)
2296 san_mac_addr[i] = 0xFF;
2297
2298 goto san_mac_addr_out;
2299 }
2300
2301 /* make sure we know which port we need to program */
2302 hw->mac.ops.set_lan_id(hw);
2303 /* apply the port offset to the address offset */
2304 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2305 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2306 for (i = 0; i < 3; i++) {
2307 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2308 san_mac_addr[i * 2] = (u8)(san_mac_data);
2309 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2310 san_mac_offset++;
2311 }
2312
2313san_mac_addr_out:
2314 return 0;
2315}
2316
2317/**
2318 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2319 * @hw: pointer to hardware structure
2320 *
2321 * Read PCIe configuration space, and get the MSI-X vector count from
2322 * the capabilities table.
2323 **/
2324u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2325{
2326 struct ixgbe_adapter *adapter = hw->back;
2327 u16 msix_count;
2328 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
2329 &msix_count);
2330 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2331
2332 /* MSI-X count is zero-based in HW, so increment to give proper value */
2333 msix_count++;
2334
2335 return msix_count;
2336}
2337
2338/**
2339 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2340 * @hw: pointer to hardware struct
2341 * @rar: receive address register index to disassociate
2342 * @vmdq: VMDq pool index to remove from the rar
2343 **/
2344s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2345{
2346 u32 mpsar_lo, mpsar_hi;
2347 u32 rar_entries = hw->mac.num_rar_entries;
2348
2349 if (rar < rar_entries) {
2350 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2351 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2352
2353 if (!mpsar_lo && !mpsar_hi)
2354 goto done;
2355
2356 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2357 if (mpsar_lo) {
2358 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2359 mpsar_lo = 0;
2360 }
2361 if (mpsar_hi) {
2362 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2363 mpsar_hi = 0;
2364 }
2365 } else if (vmdq < 32) {
2366 mpsar_lo &= ~(1 << vmdq);
2367 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2368 } else {
2369 mpsar_hi &= ~(1 << (vmdq - 32));
2370 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2371 }
2372
2373 /* was that the last pool using this rar? */
2374 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2375 hw->mac.ops.clear_rar(hw, rar);
2376 } else {
2377 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2378 }
2379
2380done:
2381 return 0;
2382}
2383
2384/**
2385 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2386 * @hw: pointer to hardware struct
2387 * @rar: receive address register index to associate with a VMDq index
2388 * @vmdq: VMDq pool index
2389 **/
2390s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2391{
2392 u32 mpsar;
2393 u32 rar_entries = hw->mac.num_rar_entries;
2394
2395 if (rar < rar_entries) {
2396 if (vmdq < 32) {
2397 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2398 mpsar |= 1 << vmdq;
2399 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2400 } else {
2401 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2402 mpsar |= 1 << (vmdq - 32);
2403 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2404 }
2405 } else {
2406 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2407 }
2408 return 0;
2409}
2410
2411/**
2412 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2413 * @hw: pointer to hardware structure
2414 **/
2415s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2416{
2417 int i;
2418
2419
2420 for (i = 0; i < 128; i++)
2421 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2422
2423 return 0;
2424}
2425
2426/**
2427 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
2428 * @hw: pointer to hardware structure
2429 * @vlan: VLAN id to write to VLAN filter
2430 *
2431 * return the VLVF index where this VLAN id should be placed
2432 *
2433 **/
5d5b7c39 2434static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
21ce849b
MC
2435{
2436 u32 bits = 0;
2437 u32 first_empty_slot = 0;
2438 s32 regindex;
2439
2440 /* short cut the special case */
2441 if (vlan == 0)
2442 return 0;
2443
2444 /*
2445 * Search for the vlan id in the VLVF entries. Save off the first empty
2446 * slot found along the way
2447 */
2448 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2449 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2450 if (!bits && !(first_empty_slot))
2451 first_empty_slot = regindex;
2452 else if ((bits & 0x0FFF) == vlan)
2453 break;
2454 }
2455
2456 /*
2457 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
2458 * in the VLVF. Else use the first empty VLVF register for this
2459 * vlan id.
2460 */
2461 if (regindex >= IXGBE_VLVF_ENTRIES) {
2462 if (first_empty_slot)
2463 regindex = first_empty_slot;
2464 else {
2465 hw_dbg(hw, "No space in VLVF.\n");
2466 regindex = IXGBE_ERR_NO_SPACE;
2467 }
2468 }
2469
2470 return regindex;
2471}
2472
2473/**
2474 * ixgbe_set_vfta_generic - Set VLAN filter table
2475 * @hw: pointer to hardware structure
2476 * @vlan: VLAN id to write to VLAN filter
2477 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2478 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2479 *
2480 * Turn on/off specified VLAN in the VLAN filter table.
2481 **/
2482s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
2483 bool vlan_on)
2484{
2485 s32 regindex;
2486 u32 bitindex;
2487 u32 vfta;
2488 u32 bits;
2489 u32 vt;
2490 u32 targetbit;
2491 bool vfta_changed = false;
2492
2493 if (vlan > 4095)
2494 return IXGBE_ERR_PARAM;
2495
2496 /*
2497 * this is a 2 part operation - first the VFTA, then the
2498 * VLVF and VLVFB if VT Mode is set
2499 * We don't write the VFTA until we know the VLVF part succeeded.
2500 */
2501
2502 /* Part 1
2503 * The VFTA is a bitstring made up of 128 32-bit registers
2504 * that enable the particular VLAN id, much like the MTA:
2505 * bits[11-5]: which register
2506 * bits[4-0]: which bit in the register
2507 */
2508 regindex = (vlan >> 5) & 0x7F;
2509 bitindex = vlan & 0x1F;
2510 targetbit = (1 << bitindex);
2511 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
2512
2513 if (vlan_on) {
2514 if (!(vfta & targetbit)) {
2515 vfta |= targetbit;
2516 vfta_changed = true;
2517 }
2518 } else {
2519 if ((vfta & targetbit)) {
2520 vfta &= ~targetbit;
2521 vfta_changed = true;
2522 }
2523 }
2524
2525 /* Part 2
2526 * If VT Mode is set
2527 * Either vlan_on
2528 * make sure the vlan is in VLVF
2529 * set the vind bit in the matching VLVFB
2530 * Or !vlan_on
2531 * clear the pool bit and possibly the vind
2532 */
2533 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2534 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
2535 s32 vlvf_index;
2536
2537 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
2538 if (vlvf_index < 0)
2539 return vlvf_index;
2540
2541 if (vlan_on) {
2542 /* set the pool bit */
2543 if (vind < 32) {
2544 bits = IXGBE_READ_REG(hw,
2545 IXGBE_VLVFB(vlvf_index*2));
2546 bits |= (1 << vind);
2547 IXGBE_WRITE_REG(hw,
2548 IXGBE_VLVFB(vlvf_index*2),
2549 bits);
2550 } else {
2551 bits = IXGBE_READ_REG(hw,
2552 IXGBE_VLVFB((vlvf_index*2)+1));
2553 bits |= (1 << (vind-32));
2554 IXGBE_WRITE_REG(hw,
2555 IXGBE_VLVFB((vlvf_index*2)+1),
2556 bits);
2557 }
2558 } else {
2559 /* clear the pool bit */
2560 if (vind < 32) {
2561 bits = IXGBE_READ_REG(hw,
2562 IXGBE_VLVFB(vlvf_index*2));
2563 bits &= ~(1 << vind);
2564 IXGBE_WRITE_REG(hw,
2565 IXGBE_VLVFB(vlvf_index*2),
2566 bits);
2567 bits |= IXGBE_READ_REG(hw,
2568 IXGBE_VLVFB((vlvf_index*2)+1));
2569 } else {
2570 bits = IXGBE_READ_REG(hw,
2571 IXGBE_VLVFB((vlvf_index*2)+1));
2572 bits &= ~(1 << (vind-32));
2573 IXGBE_WRITE_REG(hw,
2574 IXGBE_VLVFB((vlvf_index*2)+1),
2575 bits);
2576 bits |= IXGBE_READ_REG(hw,
2577 IXGBE_VLVFB(vlvf_index*2));
2578 }
2579 }
2580
2581 /*
2582 * If there are still bits set in the VLVFB registers
2583 * for the VLAN ID indicated we need to see if the
2584 * caller is requesting that we clear the VFTA entry bit.
2585 * If the caller has requested that we clear the VFTA
2586 * entry bit but there are still pools/VFs using this VLAN
2587 * ID entry then ignore the request. We're not worried
2588 * about the case where we're turning the VFTA VLAN ID
2589 * entry bit on, only when requested to turn it off as
2590 * there may be multiple pools and/or VFs using the
2591 * VLAN ID entry. In that case we cannot clear the
2592 * VFTA bit until all pools/VFs using that VLAN ID have also
2593 * been cleared. This will be indicated by "bits" being
2594 * zero.
2595 */
2596 if (bits) {
2597 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
2598 (IXGBE_VLVF_VIEN | vlan));
2599 if (!vlan_on) {
2600 /* someone wants to clear the vfta entry
2601 * but some pools/VFs are still using it.
2602 * Ignore it. */
2603 vfta_changed = false;
2604 }
2605 }
2606 else
2607 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
2608 }
2609
2610 if (vfta_changed)
2611 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
2612
2613 return 0;
2614}
2615
2616/**
2617 * ixgbe_clear_vfta_generic - Clear VLAN filter table
2618 * @hw: pointer to hardware structure
2619 *
2620 * Clears the VLAN filer table, and the VMDq index associated with the filter
2621 **/
2622s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2623{
2624 u32 offset;
2625
2626 for (offset = 0; offset < hw->mac.vft_size; offset++)
2627 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
2628
2629 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
2630 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
2631 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
2632 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
2633 }
2634
2635 return 0;
2636}
2637
2638/**
2639 * ixgbe_check_mac_link_generic - Determine link and speed status
2640 * @hw: pointer to hardware structure
2641 * @speed: pointer to link speed
2642 * @link_up: true when link is up
2643 * @link_up_wait_to_complete: bool used to wait for link up or not
2644 *
2645 * Reads the links register to determine if link is up and the current speed
2646 **/
2647s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2648 bool *link_up, bool link_up_wait_to_complete)
2649{
2650 u32 links_reg;
2651 u32 i;
2652
2653 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2654 if (link_up_wait_to_complete) {
2655 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2656 if (links_reg & IXGBE_LINKS_UP) {
2657 *link_up = true;
2658 break;
2659 } else {
2660 *link_up = false;
2661 }
2662 msleep(100);
2663 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2664 }
2665 } else {
2666 if (links_reg & IXGBE_LINKS_UP)
2667 *link_up = true;
2668 else
2669 *link_up = false;
2670 }
2671
2672 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2673 IXGBE_LINKS_SPEED_10G_82599)
2674 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2675 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2676 IXGBE_LINKS_SPEED_1G_82599)
2677 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2678 else
2679 *speed = IXGBE_LINK_SPEED_100_FULL;
2680
2681 /* if link is down, zero out the current_mode */
2682 if (*link_up == false) {
2683 hw->fc.current_mode = ixgbe_fc_none;
2684 hw->fc.fc_was_autonegged = false;
2685 }
2686
2687 return 0;
2688}