]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This code is derived from the VIA reference driver (copyright message | |
3 | * below) provided to Red Hat by VIA Networking Technologies, Inc. for | |
4 | * addition to the Linux kernel. | |
5 | * | |
6 | * The code has been merged into one source file, cleaned up to follow | |
7 | * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned | |
8 | * for 64bit hardware platforms. | |
9 | * | |
10 | * TODO | |
11 | * rx_copybreak/alignment | |
12 | * More testing | |
13 | * | |
14 | * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk> | |
15 | * Additional fixes and clean up: Francois Romieu | |
16 | * | |
17 | * This source has not been verified for use in safety critical systems. | |
18 | * | |
19 | * Please direct queries about the revamped driver to the linux-kernel | |
20 | * list not VIA. | |
21 | * | |
22 | * Original code: | |
23 | * | |
24 | * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. | |
25 | * All rights reserved. | |
26 | * | |
27 | * This software may be redistributed and/or modified under | |
28 | * the terms of the GNU General Public License as published by the Free | |
29 | * Software Foundation; either version 2 of the License, or | |
30 | * any later version. | |
31 | * | |
32 | * This program is distributed in the hope that it will be useful, but | |
33 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
34 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
35 | * for more details. | |
36 | * | |
37 | * Author: Chuang Liang-Shing, AJ Jiang | |
38 | * | |
39 | * Date: Jan 24, 2003 | |
40 | * | |
41 | * MODULE_LICENSE("GPL"); | |
42 | * | |
43 | */ | |
44 | ||
45 | ||
46 | #include <linux/module.h> | |
47 | #include <linux/types.h> | |
48 | #include <linux/init.h> | |
49 | #include <linux/mm.h> | |
50 | #include <linux/errno.h> | |
51 | #include <linux/ioport.h> | |
52 | #include <linux/pci.h> | |
53 | #include <linux/kernel.h> | |
54 | #include <linux/netdevice.h> | |
55 | #include <linux/etherdevice.h> | |
56 | #include <linux/skbuff.h> | |
57 | #include <linux/delay.h> | |
58 | #include <linux/timer.h> | |
59 | #include <linux/slab.h> | |
60 | #include <linux/interrupt.h> | |
61 | #include <linux/string.h> | |
62 | #include <linux/wait.h> | |
63 | #include <linux/io.h> | |
64 | #include <linux/if.h> | |
65 | #include <linux/uaccess.h> | |
66 | #include <linux/proc_fs.h> | |
67 | #include <linux/inetdevice.h> | |
68 | #include <linux/reboot.h> | |
69 | #include <linux/ethtool.h> | |
70 | #include <linux/mii.h> | |
71 | #include <linux/in.h> | |
72 | #include <linux/if_arp.h> | |
73 | #include <linux/if_vlan.h> | |
74 | #include <linux/ip.h> | |
75 | #include <linux/tcp.h> | |
76 | #include <linux/udp.h> | |
77 | #include <linux/crc-ccitt.h> | |
78 | #include <linux/crc32.h> | |
79 | ||
80 | #include "via-velocity.h" | |
81 | ||
82 | ||
83 | static int velocity_nics; | |
84 | static int msglevel = MSG_LEVEL_INFO; | |
85 | ||
86 | /** | |
87 | * mac_get_cam_mask - Read a CAM mask | |
88 | * @regs: register block for this velocity | |
89 | * @mask: buffer to store mask | |
90 | * | |
91 | * Fetch the mask bits of the selected CAM and store them into the | |
92 | * provided mask buffer. | |
93 | */ | |
94 | static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask) | |
95 | { | |
96 | int i; | |
97 | ||
98 | /* Select CAM mask */ | |
99 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
100 | ||
101 | writeb(0, ®s->CAMADDR); | |
102 | ||
103 | /* read mask */ | |
104 | for (i = 0; i < 8; i++) | |
105 | *mask++ = readb(&(regs->MARCAM[i])); | |
106 | ||
107 | /* disable CAMEN */ | |
108 | writeb(0, ®s->CAMADDR); | |
109 | ||
110 | /* Select mar */ | |
111 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
112 | } | |
113 | ||
114 | ||
115 | /** | |
116 | * mac_set_cam_mask - Set a CAM mask | |
117 | * @regs: register block for this velocity | |
118 | * @mask: CAM mask to load | |
119 | * | |
120 | * Store a new mask into a CAM | |
121 | */ | |
122 | static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask) | |
123 | { | |
124 | int i; | |
125 | /* Select CAM mask */ | |
126 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
127 | ||
128 | writeb(CAMADDR_CAMEN, ®s->CAMADDR); | |
129 | ||
130 | for (i = 0; i < 8; i++) | |
131 | writeb(*mask++, &(regs->MARCAM[i])); | |
132 | ||
133 | /* disable CAMEN */ | |
134 | writeb(0, ®s->CAMADDR); | |
135 | ||
136 | /* Select mar */ | |
137 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
138 | } | |
139 | ||
140 | static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask) | |
141 | { | |
142 | int i; | |
143 | /* Select CAM mask */ | |
144 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
145 | ||
146 | writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); | |
147 | ||
148 | for (i = 0; i < 8; i++) | |
149 | writeb(*mask++, &(regs->MARCAM[i])); | |
150 | ||
151 | /* disable CAMEN */ | |
152 | writeb(0, ®s->CAMADDR); | |
153 | ||
154 | /* Select mar */ | |
155 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
156 | } | |
157 | ||
158 | /** | |
159 | * mac_set_cam - set CAM data | |
160 | * @regs: register block of this velocity | |
161 | * @idx: Cam index | |
162 | * @addr: 2 or 6 bytes of CAM data | |
163 | * | |
164 | * Load an address or vlan tag into a CAM | |
165 | */ | |
166 | static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr) | |
167 | { | |
168 | int i; | |
169 | ||
170 | /* Select CAM mask */ | |
171 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
172 | ||
173 | idx &= (64 - 1); | |
174 | ||
175 | writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); | |
176 | ||
177 | for (i = 0; i < 6; i++) | |
178 | writeb(*addr++, &(regs->MARCAM[i])); | |
179 | ||
180 | BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); | |
181 | ||
182 | udelay(10); | |
183 | ||
184 | writeb(0, ®s->CAMADDR); | |
185 | ||
186 | /* Select mar */ | |
187 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
188 | } | |
189 | ||
190 | static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx, | |
191 | const u8 *addr) | |
192 | { | |
193 | ||
194 | /* Select CAM mask */ | |
195 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
196 | ||
197 | idx &= (64 - 1); | |
198 | ||
199 | writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); | |
200 | writew(*((u16 *) addr), ®s->MARCAM[0]); | |
201 | ||
202 | BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); | |
203 | ||
204 | udelay(10); | |
205 | ||
206 | writeb(0, ®s->CAMADDR); | |
207 | ||
208 | /* Select mar */ | |
209 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
210 | } | |
211 | ||
212 | ||
213 | /** | |
214 | * mac_wol_reset - reset WOL after exiting low power | |
215 | * @regs: register block of this velocity | |
216 | * | |
217 | * Called after we drop out of wake on lan mode in order to | |
218 | * reset the Wake on lan features. This function doesn't restore | |
219 | * the rest of the logic from the result of sleep/wakeup | |
220 | */ | |
221 | static void mac_wol_reset(struct mac_regs __iomem *regs) | |
222 | { | |
223 | ||
224 | /* Turn off SWPTAG right after leaving power mode */ | |
225 | BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); | |
226 | /* clear sticky bits */ | |
227 | BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); | |
228 | ||
229 | BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); | |
230 | BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
231 | /* disable force PME-enable */ | |
232 | writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); | |
233 | /* disable power-event config bit */ | |
234 | writew(0xFFFF, ®s->WOLCRClr); | |
235 | /* clear power status */ | |
236 | writew(0xFFFF, ®s->WOLSRClr); | |
237 | } | |
238 | ||
239 | static const struct ethtool_ops velocity_ethtool_ops; | |
240 | ||
241 | /* | |
242 | Define module options | |
243 | */ | |
244 | ||
245 | MODULE_AUTHOR("VIA Networking Technologies, Inc."); | |
246 | MODULE_LICENSE("GPL"); | |
247 | MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); | |
248 | ||
249 | #define VELOCITY_PARAM(N, D) \ | |
250 | static int N[MAX_UNITS] = OPTION_DEFAULT;\ | |
251 | module_param_array(N, int, NULL, 0); \ | |
252 | MODULE_PARM_DESC(N, D); | |
253 | ||
254 | #define RX_DESC_MIN 64 | |
255 | #define RX_DESC_MAX 255 | |
256 | #define RX_DESC_DEF 64 | |
257 | VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); | |
258 | ||
259 | #define TX_DESC_MIN 16 | |
260 | #define TX_DESC_MAX 256 | |
261 | #define TX_DESC_DEF 64 | |
262 | VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); | |
263 | ||
264 | #define RX_THRESH_MIN 0 | |
265 | #define RX_THRESH_MAX 3 | |
266 | #define RX_THRESH_DEF 0 | |
267 | /* rx_thresh[] is used for controlling the receive fifo threshold. | |
268 | 0: indicate the rxfifo threshold is 128 bytes. | |
269 | 1: indicate the rxfifo threshold is 512 bytes. | |
270 | 2: indicate the rxfifo threshold is 1024 bytes. | |
271 | 3: indicate the rxfifo threshold is store & forward. | |
272 | */ | |
273 | VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); | |
274 | ||
275 | #define DMA_LENGTH_MIN 0 | |
276 | #define DMA_LENGTH_MAX 7 | |
277 | #define DMA_LENGTH_DEF 6 | |
278 | ||
279 | /* DMA_length[] is used for controlling the DMA length | |
280 | 0: 8 DWORDs | |
281 | 1: 16 DWORDs | |
282 | 2: 32 DWORDs | |
283 | 3: 64 DWORDs | |
284 | 4: 128 DWORDs | |
285 | 5: 256 DWORDs | |
286 | 6: SF(flush till emply) | |
287 | 7: SF(flush till emply) | |
288 | */ | |
289 | VELOCITY_PARAM(DMA_length, "DMA length"); | |
290 | ||
291 | #define IP_ALIG_DEF 0 | |
292 | /* IP_byte_align[] is used for IP header DWORD byte aligned | |
293 | 0: indicate the IP header won't be DWORD byte aligned.(Default) . | |
294 | 1: indicate the IP header will be DWORD byte aligned. | |
295 | In some enviroment, the IP header should be DWORD byte aligned, | |
296 | or the packet will be droped when we receive it. (eg: IPVS) | |
297 | */ | |
298 | VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); | |
299 | ||
300 | #define FLOW_CNTL_DEF 1 | |
301 | #define FLOW_CNTL_MIN 1 | |
302 | #define FLOW_CNTL_MAX 5 | |
303 | ||
304 | /* flow_control[] is used for setting the flow control ability of NIC. | |
305 | 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. | |
306 | 2: enable TX flow control. | |
307 | 3: enable RX flow control. | |
308 | 4: enable RX/TX flow control. | |
309 | 5: disable | |
310 | */ | |
311 | VELOCITY_PARAM(flow_control, "Enable flow control ability"); | |
312 | ||
313 | #define MED_LNK_DEF 0 | |
314 | #define MED_LNK_MIN 0 | |
315 | #define MED_LNK_MAX 4 | |
316 | /* speed_duplex[] is used for setting the speed and duplex mode of NIC. | |
317 | 0: indicate autonegotiation for both speed and duplex mode | |
318 | 1: indicate 100Mbps half duplex mode | |
319 | 2: indicate 100Mbps full duplex mode | |
320 | 3: indicate 10Mbps half duplex mode | |
321 | 4: indicate 10Mbps full duplex mode | |
322 | ||
323 | Note: | |
324 | if EEPROM have been set to the force mode, this option is ignored | |
325 | by driver. | |
326 | */ | |
327 | VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); | |
328 | ||
329 | #define VAL_PKT_LEN_DEF 0 | |
330 | /* ValPktLen[] is used for setting the checksum offload ability of NIC. | |
331 | 0: Receive frame with invalid layer 2 length (Default) | |
332 | 1: Drop frame with invalid layer 2 length | |
333 | */ | |
334 | VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); | |
335 | ||
336 | #define WOL_OPT_DEF 0 | |
337 | #define WOL_OPT_MIN 0 | |
338 | #define WOL_OPT_MAX 7 | |
339 | /* wol_opts[] is used for controlling wake on lan behavior. | |
340 | 0: Wake up if recevied a magic packet. (Default) | |
341 | 1: Wake up if link status is on/off. | |
342 | 2: Wake up if recevied an arp packet. | |
343 | 4: Wake up if recevied any unicast packet. | |
344 | Those value can be sumed up to support more than one option. | |
345 | */ | |
346 | VELOCITY_PARAM(wol_opts, "Wake On Lan options"); | |
347 | ||
348 | static int rx_copybreak = 200; | |
349 | module_param(rx_copybreak, int, 0644); | |
350 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | |
351 | ||
352 | /* | |
353 | * Internal board variants. At the moment we have only one | |
354 | */ | |
355 | static struct velocity_info_tbl chip_info_table[] = { | |
356 | {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL}, | |
357 | { } | |
358 | }; | |
359 | ||
360 | /* | |
361 | * Describe the PCI device identifiers that we support in this | |
362 | * device driver. Used for hotplug autoloading. | |
363 | */ | |
364 | static const struct pci_device_id velocity_id_table[] __devinitdata = { | |
365 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, | |
366 | { } | |
367 | }; | |
368 | ||
369 | MODULE_DEVICE_TABLE(pci, velocity_id_table); | |
370 | ||
371 | /** | |
372 | * get_chip_name - identifier to name | |
373 | * @id: chip identifier | |
374 | * | |
375 | * Given a chip identifier return a suitable description. Returns | |
376 | * a pointer a static string valid while the driver is loaded. | |
377 | */ | |
378 | static const char __devinit *get_chip_name(enum chip_type chip_id) | |
379 | { | |
380 | int i; | |
381 | for (i = 0; chip_info_table[i].name != NULL; i++) | |
382 | if (chip_info_table[i].chip_id == chip_id) | |
383 | break; | |
384 | return chip_info_table[i].name; | |
385 | } | |
386 | ||
387 | /** | |
388 | * velocity_remove1 - device unplug | |
389 | * @pdev: PCI device being removed | |
390 | * | |
391 | * Device unload callback. Called on an unplug or on module | |
392 | * unload for each active device that is present. Disconnects | |
393 | * the device from the network layer and frees all the resources | |
394 | */ | |
395 | static void __devexit velocity_remove1(struct pci_dev *pdev) | |
396 | { | |
397 | struct net_device *dev = pci_get_drvdata(pdev); | |
398 | struct velocity_info *vptr = netdev_priv(dev); | |
399 | ||
400 | unregister_netdev(dev); | |
401 | iounmap(vptr->mac_regs); | |
402 | pci_release_regions(pdev); | |
403 | pci_disable_device(pdev); | |
404 | pci_set_drvdata(pdev, NULL); | |
405 | free_netdev(dev); | |
406 | ||
407 | velocity_nics--; | |
408 | } | |
409 | ||
410 | /** | |
411 | * velocity_set_int_opt - parser for integer options | |
412 | * @opt: pointer to option value | |
413 | * @val: value the user requested (or -1 for default) | |
414 | * @min: lowest value allowed | |
415 | * @max: highest value allowed | |
416 | * @def: default value | |
417 | * @name: property name | |
418 | * @dev: device name | |
419 | * | |
420 | * Set an integer property in the module options. This function does | |
421 | * all the verification and checking as well as reporting so that | |
422 | * we don't duplicate code for each option. | |
423 | */ | |
424 | static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname) | |
425 | { | |
426 | if (val == -1) | |
427 | *opt = def; | |
428 | else if (val < min || val > max) { | |
429 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", | |
430 | devname, name, min, max); | |
431 | *opt = def; | |
432 | } else { | |
433 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", | |
434 | devname, name, val); | |
435 | *opt = val; | |
436 | } | |
437 | } | |
438 | ||
439 | /** | |
440 | * velocity_set_bool_opt - parser for boolean options | |
441 | * @opt: pointer to option value | |
442 | * @val: value the user requested (or -1 for default) | |
443 | * @def: default value (yes/no) | |
444 | * @flag: numeric value to set for true. | |
445 | * @name: property name | |
446 | * @dev: device name | |
447 | * | |
448 | * Set a boolean property in the module options. This function does | |
449 | * all the verification and checking as well as reporting so that | |
450 | * we don't duplicate code for each option. | |
451 | */ | |
452 | static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname) | |
453 | { | |
454 | (*opt) &= (~flag); | |
455 | if (val == -1) | |
456 | *opt |= (def ? flag : 0); | |
457 | else if (val < 0 || val > 1) { | |
458 | printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", | |
459 | devname, name); | |
460 | *opt |= (def ? flag : 0); | |
461 | } else { | |
462 | printk(KERN_INFO "%s: set parameter %s to %s\n", | |
463 | devname, name, val ? "TRUE" : "FALSE"); | |
464 | *opt |= (val ? flag : 0); | |
465 | } | |
466 | } | |
467 | ||
468 | /** | |
469 | * velocity_get_options - set options on device | |
470 | * @opts: option structure for the device | |
471 | * @index: index of option to use in module options array | |
472 | * @devname: device name | |
473 | * | |
474 | * Turn the module and command options into a single structure | |
475 | * for the current device | |
476 | */ | |
477 | static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname) | |
478 | { | |
479 | ||
480 | velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); | |
481 | velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); | |
482 | velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); | |
483 | velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); | |
484 | ||
485 | velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); | |
486 | velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); | |
487 | velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); | |
488 | velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); | |
489 | velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); | |
490 | opts->numrx = (opts->numrx & ~3); | |
491 | } | |
492 | ||
493 | /** | |
494 | * velocity_init_cam_filter - initialise CAM | |
495 | * @vptr: velocity to program | |
496 | * | |
497 | * Initialize the content addressable memory used for filters. Load | |
498 | * appropriately according to the presence of VLAN | |
499 | */ | |
500 | static void velocity_init_cam_filter(struct velocity_info *vptr) | |
501 | { | |
502 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
503 | ||
504 | /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ | |
505 | WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); | |
506 | WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); | |
507 | ||
508 | /* Disable all CAMs */ | |
509 | memset(vptr->vCAMmask, 0, sizeof(u8) * 8); | |
510 | memset(vptr->mCAMmask, 0, sizeof(u8) * 8); | |
511 | mac_set_vlan_cam_mask(regs, vptr->vCAMmask); | |
512 | mac_set_cam_mask(regs, vptr->mCAMmask); | |
513 | ||
514 | /* Enable VCAMs */ | |
515 | if (vptr->vlgrp) { | |
516 | unsigned int vid, i = 0; | |
517 | ||
518 | if (!vlan_group_get_device(vptr->vlgrp, 0)) | |
519 | WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); | |
520 | ||
521 | for (vid = 1; (vid < VLAN_VID_MASK); vid++) { | |
522 | if (vlan_group_get_device(vptr->vlgrp, vid)) { | |
523 | mac_set_vlan_cam(regs, i, (u8 *) &vid); | |
524 | vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); | |
525 | if (++i >= VCAM_SIZE) | |
526 | break; | |
527 | } | |
528 | } | |
529 | mac_set_vlan_cam_mask(regs, vptr->vCAMmask); | |
530 | } | |
531 | } | |
532 | ||
533 | static void velocity_vlan_rx_register(struct net_device *dev, | |
534 | struct vlan_group *grp) | |
535 | { | |
536 | struct velocity_info *vptr = netdev_priv(dev); | |
537 | ||
538 | vptr->vlgrp = grp; | |
539 | } | |
540 | ||
541 | static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |
542 | { | |
543 | struct velocity_info *vptr = netdev_priv(dev); | |
544 | ||
545 | spin_lock_irq(&vptr->lock); | |
546 | velocity_init_cam_filter(vptr); | |
547 | spin_unlock_irq(&vptr->lock); | |
548 | } | |
549 | ||
550 | static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |
551 | { | |
552 | struct velocity_info *vptr = netdev_priv(dev); | |
553 | ||
554 | spin_lock_irq(&vptr->lock); | |
555 | vlan_group_set_device(vptr->vlgrp, vid, NULL); | |
556 | velocity_init_cam_filter(vptr); | |
557 | spin_unlock_irq(&vptr->lock); | |
558 | } | |
559 | ||
560 | static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) | |
561 | { | |
562 | vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; | |
563 | } | |
564 | ||
565 | /** | |
566 | * velocity_rx_reset - handle a receive reset | |
567 | * @vptr: velocity we are resetting | |
568 | * | |
569 | * Reset the ownership and status for the receive ring side. | |
570 | * Hand all the receive queue to the NIC. | |
571 | */ | |
572 | static void velocity_rx_reset(struct velocity_info *vptr) | |
573 | { | |
574 | ||
575 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
576 | int i; | |
577 | ||
578 | velocity_init_rx_ring_indexes(vptr); | |
579 | ||
580 | /* | |
581 | * Init state, all RD entries belong to the NIC | |
582 | */ | |
583 | for (i = 0; i < vptr->options.numrx; ++i) | |
584 | vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; | |
585 | ||
586 | writew(vptr->options.numrx, ®s->RBRDU); | |
587 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); | |
588 | writew(0, ®s->RDIdx); | |
589 | writew(vptr->options.numrx - 1, ®s->RDCSize); | |
590 | } | |
591 | ||
592 | /** | |
593 | * velocity_get_opt_media_mode - get media selection | |
594 | * @vptr: velocity adapter | |
595 | * | |
596 | * Get the media mode stored in EEPROM or module options and load | |
597 | * mii_status accordingly. The requested link state information | |
598 | * is also returned. | |
599 | */ | |
600 | static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) | |
601 | { | |
602 | u32 status = 0; | |
603 | ||
604 | switch (vptr->options.spd_dpx) { | |
605 | case SPD_DPX_AUTO: | |
606 | status = VELOCITY_AUTONEG_ENABLE; | |
607 | break; | |
608 | case SPD_DPX_100_FULL: | |
609 | status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; | |
610 | break; | |
611 | case SPD_DPX_10_FULL: | |
612 | status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; | |
613 | break; | |
614 | case SPD_DPX_100_HALF: | |
615 | status = VELOCITY_SPEED_100; | |
616 | break; | |
617 | case SPD_DPX_10_HALF: | |
618 | status = VELOCITY_SPEED_10; | |
619 | break; | |
620 | } | |
621 | vptr->mii_status = status; | |
622 | return status; | |
623 | } | |
624 | ||
625 | /** | |
626 | * safe_disable_mii_autopoll - autopoll off | |
627 | * @regs: velocity registers | |
628 | * | |
629 | * Turn off the autopoll and wait for it to disable on the chip | |
630 | */ | |
631 | static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs) | |
632 | { | |
633 | u16 ww; | |
634 | ||
635 | /* turn off MAUTO */ | |
636 | writeb(0, ®s->MIICR); | |
637 | for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { | |
638 | udelay(1); | |
639 | if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) | |
640 | break; | |
641 | } | |
642 | } | |
643 | ||
644 | /** | |
645 | * enable_mii_autopoll - turn on autopolling | |
646 | * @regs: velocity registers | |
647 | * | |
648 | * Enable the MII link status autopoll feature on the Velocity | |
649 | * hardware. Wait for it to enable. | |
650 | */ | |
651 | static void enable_mii_autopoll(struct mac_regs __iomem *regs) | |
652 | { | |
653 | int ii; | |
654 | ||
655 | writeb(0, &(regs->MIICR)); | |
656 | writeb(MIIADR_SWMPL, ®s->MIIADR); | |
657 | ||
658 | for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { | |
659 | udelay(1); | |
660 | if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) | |
661 | break; | |
662 | } | |
663 | ||
664 | writeb(MIICR_MAUTO, ®s->MIICR); | |
665 | ||
666 | for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { | |
667 | udelay(1); | |
668 | if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) | |
669 | break; | |
670 | } | |
671 | ||
672 | } | |
673 | ||
674 | /** | |
675 | * velocity_mii_read - read MII data | |
676 | * @regs: velocity registers | |
677 | * @index: MII register index | |
678 | * @data: buffer for received data | |
679 | * | |
680 | * Perform a single read of an MII 16bit register. Returns zero | |
681 | * on success or -ETIMEDOUT if the PHY did not respond. | |
682 | */ | |
683 | static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data) | |
684 | { | |
685 | u16 ww; | |
686 | ||
687 | /* | |
688 | * Disable MIICR_MAUTO, so that mii addr can be set normally | |
689 | */ | |
690 | safe_disable_mii_autopoll(regs); | |
691 | ||
692 | writeb(index, ®s->MIIADR); | |
693 | ||
694 | BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); | |
695 | ||
696 | for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { | |
697 | if (!(readb(®s->MIICR) & MIICR_RCMD)) | |
698 | break; | |
699 | } | |
700 | ||
701 | *data = readw(®s->MIIDATA); | |
702 | ||
703 | enable_mii_autopoll(regs); | |
704 | if (ww == W_MAX_TIMEOUT) | |
705 | return -ETIMEDOUT; | |
706 | return 0; | |
707 | } | |
708 | ||
709 | ||
710 | /** | |
711 | * mii_check_media_mode - check media state | |
712 | * @regs: velocity registers | |
713 | * | |
714 | * Check the current MII status and determine the link status | |
715 | * accordingly | |
716 | */ | |
717 | static u32 mii_check_media_mode(struct mac_regs __iomem *regs) | |
718 | { | |
719 | u32 status = 0; | |
720 | u16 ANAR; | |
721 | ||
722 | if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) | |
723 | status |= VELOCITY_LINK_FAIL; | |
724 | ||
725 | if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) | |
726 | status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; | |
727 | else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) | |
728 | status |= (VELOCITY_SPEED_1000); | |
729 | else { | |
730 | velocity_mii_read(regs, MII_REG_ANAR, &ANAR); | |
731 | if (ANAR & ANAR_TXFD) | |
732 | status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); | |
733 | else if (ANAR & ANAR_TX) | |
734 | status |= VELOCITY_SPEED_100; | |
735 | else if (ANAR & ANAR_10FD) | |
736 | status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); | |
737 | else | |
738 | status |= (VELOCITY_SPEED_10); | |
739 | } | |
740 | ||
741 | if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { | |
742 | velocity_mii_read(regs, MII_REG_ANAR, &ANAR); | |
743 | if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) | |
744 | == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { | |
745 | if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) | |
746 | status |= VELOCITY_AUTONEG_ENABLE; | |
747 | } | |
748 | } | |
749 | ||
750 | return status; | |
751 | } | |
752 | ||
753 | /** | |
754 | * velocity_mii_write - write MII data | |
755 | * @regs: velocity registers | |
756 | * @index: MII register index | |
757 | * @data: 16bit data for the MII register | |
758 | * | |
759 | * Perform a single write to an MII 16bit register. Returns zero | |
760 | * on success or -ETIMEDOUT if the PHY did not respond. | |
761 | */ | |
762 | static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data) | |
763 | { | |
764 | u16 ww; | |
765 | ||
766 | /* | |
767 | * Disable MIICR_MAUTO, so that mii addr can be set normally | |
768 | */ | |
769 | safe_disable_mii_autopoll(regs); | |
770 | ||
771 | /* MII reg offset */ | |
772 | writeb(mii_addr, ®s->MIIADR); | |
773 | /* set MII data */ | |
774 | writew(data, ®s->MIIDATA); | |
775 | ||
776 | /* turn on MIICR_WCMD */ | |
777 | BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); | |
778 | ||
779 | /* W_MAX_TIMEOUT is the timeout period */ | |
780 | for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { | |
781 | udelay(5); | |
782 | if (!(readb(®s->MIICR) & MIICR_WCMD)) | |
783 | break; | |
784 | } | |
785 | enable_mii_autopoll(regs); | |
786 | ||
787 | if (ww == W_MAX_TIMEOUT) | |
788 | return -ETIMEDOUT; | |
789 | return 0; | |
790 | } | |
791 | ||
792 | /** | |
793 | * set_mii_flow_control - flow control setup | |
794 | * @vptr: velocity interface | |
795 | * | |
796 | * Set up the flow control on this interface according to | |
797 | * the supplied user/eeprom options. | |
798 | */ | |
799 | static void set_mii_flow_control(struct velocity_info *vptr) | |
800 | { | |
801 | /*Enable or Disable PAUSE in ANAR */ | |
802 | switch (vptr->options.flow_cntl) { | |
803 | case FLOW_CNTL_TX: | |
804 | MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); | |
805 | MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); | |
806 | break; | |
807 | ||
808 | case FLOW_CNTL_RX: | |
809 | MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); | |
810 | MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); | |
811 | break; | |
812 | ||
813 | case FLOW_CNTL_TX_RX: | |
814 | MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); | |
815 | MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); | |
816 | break; | |
817 | ||
818 | case FLOW_CNTL_DISABLE: | |
819 | MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); | |
820 | MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); | |
821 | break; | |
822 | default: | |
823 | break; | |
824 | } | |
825 | } | |
826 | ||
827 | /** | |
828 | * mii_set_auto_on - autonegotiate on | |
829 | * @vptr: velocity | |
830 | * | |
831 | * Enable autonegotation on this interface | |
832 | */ | |
833 | static void mii_set_auto_on(struct velocity_info *vptr) | |
834 | { | |
835 | if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) | |
836 | MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); | |
837 | else | |
838 | MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); | |
839 | } | |
840 | ||
841 | static u32 check_connection_type(struct mac_regs __iomem *regs) | |
842 | { | |
843 | u32 status = 0; | |
844 | u8 PHYSR0; | |
845 | u16 ANAR; | |
846 | PHYSR0 = readb(®s->PHYSR0); | |
847 | ||
848 | /* | |
849 | if (!(PHYSR0 & PHYSR0_LINKGD)) | |
850 | status|=VELOCITY_LINK_FAIL; | |
851 | */ | |
852 | ||
853 | if (PHYSR0 & PHYSR0_FDPX) | |
854 | status |= VELOCITY_DUPLEX_FULL; | |
855 | ||
856 | if (PHYSR0 & PHYSR0_SPDG) | |
857 | status |= VELOCITY_SPEED_1000; | |
858 | else if (PHYSR0 & PHYSR0_SPD10) | |
859 | status |= VELOCITY_SPEED_10; | |
860 | else | |
861 | status |= VELOCITY_SPEED_100; | |
862 | ||
863 | if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { | |
864 | velocity_mii_read(regs, MII_REG_ANAR, &ANAR); | |
865 | if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) | |
866 | == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { | |
867 | if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) | |
868 | status |= VELOCITY_AUTONEG_ENABLE; | |
869 | } | |
870 | } | |
871 | ||
872 | return status; | |
873 | } | |
874 | ||
875 | ||
876 | ||
877 | /** | |
878 | * velocity_set_media_mode - set media mode | |
879 | * @mii_status: old MII link state | |
880 | * | |
881 | * Check the media link state and configure the flow control | |
882 | * PHY and also velocity hardware setup accordingly. In particular | |
883 | * we need to set up CD polling and frame bursting. | |
884 | */ | |
885 | static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) | |
886 | { | |
887 | u32 curr_status; | |
888 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
889 | ||
890 | vptr->mii_status = mii_check_media_mode(vptr->mac_regs); | |
891 | curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); | |
892 | ||
893 | /* Set mii link status */ | |
894 | set_mii_flow_control(vptr); | |
895 | ||
896 | /* | |
897 | Check if new status is consisent with current status | |
898 | if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || | |
899 | (mii_status==curr_status)) { | |
900 | vptr->mii_status=mii_check_media_mode(vptr->mac_regs); | |
901 | vptr->mii_status=check_connection_type(vptr->mac_regs); | |
902 | VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); | |
903 | return 0; | |
904 | } | |
905 | */ | |
906 | ||
907 | if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) | |
908 | MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); | |
909 | ||
910 | /* | |
911 | * If connection type is AUTO | |
912 | */ | |
913 | if (mii_status & VELOCITY_AUTONEG_ENABLE) { | |
914 | VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); | |
915 | /* clear force MAC mode bit */ | |
916 | BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
917 | /* set duplex mode of MAC according to duplex mode of MII */ | |
918 | MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); | |
919 | MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); | |
920 | MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); | |
921 | ||
922 | /* enable AUTO-NEGO mode */ | |
923 | mii_set_auto_on(vptr); | |
924 | } else { | |
925 | u16 ANAR; | |
926 | u8 CHIPGCR; | |
927 | ||
928 | /* | |
929 | * 1. if it's 3119, disable frame bursting in halfduplex mode | |
930 | * and enable it in fullduplex mode | |
931 | * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR | |
932 | * 3. only enable CD heart beat counter in 10HD mode | |
933 | */ | |
934 | ||
935 | /* set force MAC mode bit */ | |
936 | BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
937 | ||
938 | CHIPGCR = readb(®s->CHIPGCR); | |
939 | CHIPGCR &= ~CHIPGCR_FCGMII; | |
940 | ||
941 | if (mii_status & VELOCITY_DUPLEX_FULL) { | |
942 | CHIPGCR |= CHIPGCR_FCFDX; | |
943 | writeb(CHIPGCR, ®s->CHIPGCR); | |
944 | VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); | |
945 | if (vptr->rev_id < REV_ID_VT3216_A0) | |
946 | BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); | |
947 | } else { | |
948 | CHIPGCR &= ~CHIPGCR_FCFDX; | |
949 | VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); | |
950 | writeb(CHIPGCR, ®s->CHIPGCR); | |
951 | if (vptr->rev_id < REV_ID_VT3216_A0) | |
952 | BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); | |
953 | } | |
954 | ||
955 | MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); | |
956 | ||
957 | if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) | |
958 | BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); | |
959 | else | |
960 | BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); | |
961 | ||
962 | /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ | |
963 | velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); | |
964 | ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); | |
965 | if (mii_status & VELOCITY_SPEED_100) { | |
966 | if (mii_status & VELOCITY_DUPLEX_FULL) | |
967 | ANAR |= ANAR_TXFD; | |
968 | else | |
969 | ANAR |= ANAR_TX; | |
970 | } else { | |
971 | if (mii_status & VELOCITY_DUPLEX_FULL) | |
972 | ANAR |= ANAR_10FD; | |
973 | else | |
974 | ANAR |= ANAR_10; | |
975 | } | |
976 | velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); | |
977 | /* enable AUTO-NEGO mode */ | |
978 | mii_set_auto_on(vptr); | |
979 | /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ | |
980 | } | |
981 | /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ | |
982 | /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ | |
983 | return VELOCITY_LINK_CHANGE; | |
984 | } | |
985 | ||
986 | /** | |
987 | * velocity_print_link_status - link status reporting | |
988 | * @vptr: velocity to report on | |
989 | * | |
990 | * Turn the link status of the velocity card into a kernel log | |
991 | * description of the new link state, detailing speed and duplex | |
992 | * status | |
993 | */ | |
994 | static void velocity_print_link_status(struct velocity_info *vptr) | |
995 | { | |
996 | ||
997 | if (vptr->mii_status & VELOCITY_LINK_FAIL) { | |
998 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); | |
999 | } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { | |
1000 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); | |
1001 | ||
1002 | if (vptr->mii_status & VELOCITY_SPEED_1000) | |
1003 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); | |
1004 | else if (vptr->mii_status & VELOCITY_SPEED_100) | |
1005 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); | |
1006 | else | |
1007 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); | |
1008 | ||
1009 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
1010 | VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); | |
1011 | else | |
1012 | VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); | |
1013 | } else { | |
1014 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); | |
1015 | switch (vptr->options.spd_dpx) { | |
1016 | case SPD_DPX_100_HALF: | |
1017 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); | |
1018 | break; | |
1019 | case SPD_DPX_100_FULL: | |
1020 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); | |
1021 | break; | |
1022 | case SPD_DPX_10_HALF: | |
1023 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); | |
1024 | break; | |
1025 | case SPD_DPX_10_FULL: | |
1026 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); | |
1027 | break; | |
1028 | default: | |
1029 | break; | |
1030 | } | |
1031 | } | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * enable_flow_control_ability - flow control | |
1036 | * @vptr: veloity to configure | |
1037 | * | |
1038 | * Set up flow control according to the flow control options | |
1039 | * determined by the eeprom/configuration. | |
1040 | */ | |
1041 | static void enable_flow_control_ability(struct velocity_info *vptr) | |
1042 | { | |
1043 | ||
1044 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1045 | ||
1046 | switch (vptr->options.flow_cntl) { | |
1047 | ||
1048 | case FLOW_CNTL_DEFAULT: | |
1049 | if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) | |
1050 | writel(CR0_FDXRFCEN, ®s->CR0Set); | |
1051 | else | |
1052 | writel(CR0_FDXRFCEN, ®s->CR0Clr); | |
1053 | ||
1054 | if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) | |
1055 | writel(CR0_FDXTFCEN, ®s->CR0Set); | |
1056 | else | |
1057 | writel(CR0_FDXTFCEN, ®s->CR0Clr); | |
1058 | break; | |
1059 | ||
1060 | case FLOW_CNTL_TX: | |
1061 | writel(CR0_FDXTFCEN, ®s->CR0Set); | |
1062 | writel(CR0_FDXRFCEN, ®s->CR0Clr); | |
1063 | break; | |
1064 | ||
1065 | case FLOW_CNTL_RX: | |
1066 | writel(CR0_FDXRFCEN, ®s->CR0Set); | |
1067 | writel(CR0_FDXTFCEN, ®s->CR0Clr); | |
1068 | break; | |
1069 | ||
1070 | case FLOW_CNTL_TX_RX: | |
1071 | writel(CR0_FDXTFCEN, ®s->CR0Set); | |
1072 | writel(CR0_FDXRFCEN, ®s->CR0Set); | |
1073 | break; | |
1074 | ||
1075 | case FLOW_CNTL_DISABLE: | |
1076 | writel(CR0_FDXRFCEN, ®s->CR0Clr); | |
1077 | writel(CR0_FDXTFCEN, ®s->CR0Clr); | |
1078 | break; | |
1079 | ||
1080 | default: | |
1081 | break; | |
1082 | } | |
1083 | ||
1084 | } | |
1085 | ||
1086 | /** | |
1087 | * velocity_soft_reset - soft reset | |
1088 | * @vptr: velocity to reset | |
1089 | * | |
1090 | * Kick off a soft reset of the velocity adapter and then poll | |
1091 | * until the reset sequence has completed before returning. | |
1092 | */ | |
1093 | static int velocity_soft_reset(struct velocity_info *vptr) | |
1094 | { | |
1095 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1096 | int i = 0; | |
1097 | ||
1098 | writel(CR0_SFRST, ®s->CR0Set); | |
1099 | ||
1100 | for (i = 0; i < W_MAX_TIMEOUT; i++) { | |
1101 | udelay(5); | |
1102 | if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) | |
1103 | break; | |
1104 | } | |
1105 | ||
1106 | if (i == W_MAX_TIMEOUT) { | |
1107 | writel(CR0_FORSRST, ®s->CR0Set); | |
1108 | /* FIXME: PCI POSTING */ | |
1109 | /* delay 2ms */ | |
1110 | mdelay(2); | |
1111 | } | |
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | /** | |
1116 | * velocity_set_multi - filter list change callback | |
1117 | * @dev: network device | |
1118 | * | |
1119 | * Called by the network layer when the filter lists need to change | |
1120 | * for a velocity adapter. Reload the CAMs with the new address | |
1121 | * filter ruleset. | |
1122 | */ | |
1123 | static void velocity_set_multi(struct net_device *dev) | |
1124 | { | |
1125 | struct velocity_info *vptr = netdev_priv(dev); | |
1126 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1127 | u8 rx_mode; | |
1128 | int i; | |
1129 | struct dev_mc_list *mclist; | |
1130 | ||
1131 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | |
1132 | writel(0xffffffff, ®s->MARCAM[0]); | |
1133 | writel(0xffffffff, ®s->MARCAM[4]); | |
1134 | rx_mode = (RCR_AM | RCR_AB | RCR_PROM); | |
1135 | } else if ((dev->mc_count > vptr->multicast_limit) || | |
1136 | (dev->flags & IFF_ALLMULTI)) { | |
1137 | writel(0xffffffff, ®s->MARCAM[0]); | |
1138 | writel(0xffffffff, ®s->MARCAM[4]); | |
1139 | rx_mode = (RCR_AM | RCR_AB); | |
1140 | } else { | |
1141 | int offset = MCAM_SIZE - vptr->multicast_limit; | |
1142 | mac_get_cam_mask(regs, vptr->mCAMmask); | |
1143 | ||
1144 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { | |
1145 | mac_set_cam(regs, i + offset, mclist->dmi_addr); | |
1146 | vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); | |
1147 | } | |
1148 | ||
1149 | mac_set_cam_mask(regs, vptr->mCAMmask); | |
1150 | rx_mode = RCR_AM | RCR_AB | RCR_AP; | |
1151 | } | |
1152 | if (dev->mtu > 1500) | |
1153 | rx_mode |= RCR_AL; | |
1154 | ||
1155 | BYTE_REG_BITS_ON(rx_mode, ®s->RCR); | |
1156 | ||
1157 | } | |
1158 | ||
1159 | /* | |
1160 | * MII access , media link mode setting functions | |
1161 | */ | |
1162 | ||
1163 | /** | |
1164 | * mii_init - set up MII | |
1165 | * @vptr: velocity adapter | |
1166 | * @mii_status: links tatus | |
1167 | * | |
1168 | * Set up the PHY for the current link state. | |
1169 | */ | |
1170 | static void mii_init(struct velocity_info *vptr, u32 mii_status) | |
1171 | { | |
1172 | u16 BMCR; | |
1173 | ||
1174 | switch (PHYID_GET_PHY_ID(vptr->phy_id)) { | |
1175 | case PHYID_CICADA_CS8201: | |
1176 | /* | |
1177 | * Reset to hardware default | |
1178 | */ | |
1179 | MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); | |
1180 | /* | |
1181 | * Turn on ECHODIS bit in NWay-forced full mode and turn it | |
1182 | * off it in NWay-forced half mode for NWay-forced v.s. | |
1183 | * legacy-forced issue. | |
1184 | */ | |
1185 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
1186 | MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); | |
1187 | else | |
1188 | MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); | |
1189 | /* | |
1190 | * Turn on Link/Activity LED enable bit for CIS8201 | |
1191 | */ | |
1192 | MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); | |
1193 | break; | |
1194 | case PHYID_VT3216_32BIT: | |
1195 | case PHYID_VT3216_64BIT: | |
1196 | /* | |
1197 | * Reset to hardware default | |
1198 | */ | |
1199 | MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); | |
1200 | /* | |
1201 | * Turn on ECHODIS bit in NWay-forced full mode and turn it | |
1202 | * off it in NWay-forced half mode for NWay-forced v.s. | |
1203 | * legacy-forced issue | |
1204 | */ | |
1205 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
1206 | MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); | |
1207 | else | |
1208 | MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); | |
1209 | break; | |
1210 | ||
1211 | case PHYID_MARVELL_1000: | |
1212 | case PHYID_MARVELL_1000S: | |
1213 | /* | |
1214 | * Assert CRS on Transmit | |
1215 | */ | |
1216 | MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); | |
1217 | /* | |
1218 | * Reset to hardware default | |
1219 | */ | |
1220 | MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); | |
1221 | break; | |
1222 | default: | |
1223 | ; | |
1224 | } | |
1225 | velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); | |
1226 | if (BMCR & BMCR_ISO) { | |
1227 | BMCR &= ~BMCR_ISO; | |
1228 | velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | /** | |
1233 | * setup_queue_timers - Setup interrupt timers | |
1234 | * | |
1235 | * Setup interrupt frequency during suppression (timeout if the frame | |
1236 | * count isn't filled). | |
1237 | */ | |
1238 | static void setup_queue_timers(struct velocity_info *vptr) | |
1239 | { | |
1240 | /* Only for newer revisions */ | |
1241 | if (vptr->rev_id >= REV_ID_VT3216_A0) { | |
1242 | u8 txqueue_timer = 0; | |
1243 | u8 rxqueue_timer = 0; | |
1244 | ||
1245 | if (vptr->mii_status & (VELOCITY_SPEED_1000 | | |
1246 | VELOCITY_SPEED_100)) { | |
1247 | txqueue_timer = vptr->options.txqueue_timer; | |
1248 | rxqueue_timer = vptr->options.rxqueue_timer; | |
1249 | } | |
1250 | ||
1251 | writeb(txqueue_timer, &vptr->mac_regs->TQETMR); | |
1252 | writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); | |
1253 | } | |
1254 | } | |
1255 | /** | |
1256 | * setup_adaptive_interrupts - Setup interrupt suppression | |
1257 | * | |
1258 | * @vptr velocity adapter | |
1259 | * | |
1260 | * The velocity is able to suppress interrupt during high interrupt load. | |
1261 | * This function turns on that feature. | |
1262 | */ | |
1263 | static void setup_adaptive_interrupts(struct velocity_info *vptr) | |
1264 | { | |
1265 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1266 | u16 tx_intsup = vptr->options.tx_intsup; | |
1267 | u16 rx_intsup = vptr->options.rx_intsup; | |
1268 | ||
1269 | /* Setup default interrupt mask (will be changed below) */ | |
1270 | vptr->int_mask = INT_MASK_DEF; | |
1271 | ||
1272 | /* Set Tx Interrupt Suppression Threshold */ | |
1273 | writeb(CAMCR_PS0, ®s->CAMCR); | |
1274 | if (tx_intsup != 0) { | |
1275 | vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | | |
1276 | ISR_PTX2I | ISR_PTX3I); | |
1277 | writew(tx_intsup, ®s->ISRCTL); | |
1278 | } else | |
1279 | writew(ISRCTL_TSUPDIS, ®s->ISRCTL); | |
1280 | ||
1281 | /* Set Rx Interrupt Suppression Threshold */ | |
1282 | writeb(CAMCR_PS1, ®s->CAMCR); | |
1283 | if (rx_intsup != 0) { | |
1284 | vptr->int_mask &= ~ISR_PRXI; | |
1285 | writew(rx_intsup, ®s->ISRCTL); | |
1286 | } else | |
1287 | writew(ISRCTL_RSUPDIS, ®s->ISRCTL); | |
1288 | ||
1289 | /* Select page to interrupt hold timer */ | |
1290 | writeb(0, ®s->CAMCR); | |
1291 | } | |
1292 | ||
1293 | /** | |
1294 | * velocity_init_registers - initialise MAC registers | |
1295 | * @vptr: velocity to init | |
1296 | * @type: type of initialisation (hot or cold) | |
1297 | * | |
1298 | * Initialise the MAC on a reset or on first set up on the | |
1299 | * hardware. | |
1300 | */ | |
1301 | static void velocity_init_registers(struct velocity_info *vptr, | |
1302 | enum velocity_init_type type) | |
1303 | { | |
1304 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1305 | int i, mii_status; | |
1306 | ||
1307 | mac_wol_reset(regs); | |
1308 | ||
1309 | switch (type) { | |
1310 | case VELOCITY_INIT_RESET: | |
1311 | case VELOCITY_INIT_WOL: | |
1312 | ||
1313 | netif_stop_queue(vptr->dev); | |
1314 | ||
1315 | /* | |
1316 | * Reset RX to prevent RX pointer not on the 4X location | |
1317 | */ | |
1318 | velocity_rx_reset(vptr); | |
1319 | mac_rx_queue_run(regs); | |
1320 | mac_rx_queue_wake(regs); | |
1321 | ||
1322 | mii_status = velocity_get_opt_media_mode(vptr); | |
1323 | if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { | |
1324 | velocity_print_link_status(vptr); | |
1325 | if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) | |
1326 | netif_wake_queue(vptr->dev); | |
1327 | } | |
1328 | ||
1329 | enable_flow_control_ability(vptr); | |
1330 | ||
1331 | mac_clear_isr(regs); | |
1332 | writel(CR0_STOP, ®s->CR0Clr); | |
1333 | writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), | |
1334 | ®s->CR0Set); | |
1335 | ||
1336 | break; | |
1337 | ||
1338 | case VELOCITY_INIT_COLD: | |
1339 | default: | |
1340 | /* | |
1341 | * Do reset | |
1342 | */ | |
1343 | velocity_soft_reset(vptr); | |
1344 | mdelay(5); | |
1345 | ||
1346 | mac_eeprom_reload(regs); | |
1347 | for (i = 0; i < 6; i++) | |
1348 | writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); | |
1349 | ||
1350 | /* | |
1351 | * clear Pre_ACPI bit. | |
1352 | */ | |
1353 | BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); | |
1354 | mac_set_rx_thresh(regs, vptr->options.rx_thresh); | |
1355 | mac_set_dma_length(regs, vptr->options.DMA_length); | |
1356 | ||
1357 | writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); | |
1358 | /* | |
1359 | * Back off algorithm use original IEEE standard | |
1360 | */ | |
1361 | BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); | |
1362 | ||
1363 | /* | |
1364 | * Init CAM filter | |
1365 | */ | |
1366 | velocity_init_cam_filter(vptr); | |
1367 | ||
1368 | /* | |
1369 | * Set packet filter: Receive directed and broadcast address | |
1370 | */ | |
1371 | velocity_set_multi(vptr->dev); | |
1372 | ||
1373 | /* | |
1374 | * Enable MII auto-polling | |
1375 | */ | |
1376 | enable_mii_autopoll(regs); | |
1377 | ||
1378 | setup_adaptive_interrupts(vptr); | |
1379 | ||
1380 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); | |
1381 | writew(vptr->options.numrx - 1, ®s->RDCSize); | |
1382 | mac_rx_queue_run(regs); | |
1383 | mac_rx_queue_wake(regs); | |
1384 | ||
1385 | writew(vptr->options.numtx - 1, ®s->TDCSize); | |
1386 | ||
1387 | for (i = 0; i < vptr->tx.numq; i++) { | |
1388 | writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); | |
1389 | mac_tx_queue_run(regs, i); | |
1390 | } | |
1391 | ||
1392 | init_flow_control_register(vptr); | |
1393 | ||
1394 | writel(CR0_STOP, ®s->CR0Clr); | |
1395 | writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); | |
1396 | ||
1397 | mii_status = velocity_get_opt_media_mode(vptr); | |
1398 | netif_stop_queue(vptr->dev); | |
1399 | ||
1400 | mii_init(vptr, mii_status); | |
1401 | ||
1402 | if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { | |
1403 | velocity_print_link_status(vptr); | |
1404 | if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) | |
1405 | netif_wake_queue(vptr->dev); | |
1406 | } | |
1407 | ||
1408 | enable_flow_control_ability(vptr); | |
1409 | mac_hw_mibs_init(regs); | |
1410 | mac_write_int_mask(vptr->int_mask, regs); | |
1411 | mac_clear_isr(regs); | |
1412 | ||
1413 | } | |
1414 | } | |
1415 | ||
1416 | static void velocity_give_many_rx_descs(struct velocity_info *vptr) | |
1417 | { | |
1418 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1419 | int avail, dirty, unusable; | |
1420 | ||
1421 | /* | |
1422 | * RD number must be equal to 4X per hardware spec | |
1423 | * (programming guide rev 1.20, p.13) | |
1424 | */ | |
1425 | if (vptr->rx.filled < 4) | |
1426 | return; | |
1427 | ||
1428 | wmb(); | |
1429 | ||
1430 | unusable = vptr->rx.filled & 0x0003; | |
1431 | dirty = vptr->rx.dirty - unusable; | |
1432 | for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { | |
1433 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; | |
1434 | vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; | |
1435 | } | |
1436 | ||
1437 | writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); | |
1438 | vptr->rx.filled = unusable; | |
1439 | } | |
1440 | ||
1441 | /** | |
1442 | * velocity_init_dma_rings - set up DMA rings | |
1443 | * @vptr: Velocity to set up | |
1444 | * | |
1445 | * Allocate PCI mapped DMA rings for the receive and transmit layer | |
1446 | * to use. | |
1447 | */ | |
1448 | static int velocity_init_dma_rings(struct velocity_info *vptr) | |
1449 | { | |
1450 | struct velocity_opt *opt = &vptr->options; | |
1451 | const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); | |
1452 | const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); | |
1453 | struct pci_dev *pdev = vptr->pdev; | |
1454 | dma_addr_t pool_dma; | |
1455 | void *pool; | |
1456 | unsigned int i; | |
1457 | ||
1458 | /* | |
1459 | * Allocate all RD/TD rings a single pool. | |
1460 | * | |
1461 | * pci_alloc_consistent() fulfills the requirement for 64 bytes | |
1462 | * alignment | |
1463 | */ | |
1464 | pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + | |
1465 | rx_ring_size, &pool_dma); | |
1466 | if (!pool) { | |
1467 | dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", | |
1468 | vptr->dev->name); | |
1469 | return -ENOMEM; | |
1470 | } | |
1471 | ||
1472 | vptr->rx.ring = pool; | |
1473 | vptr->rx.pool_dma = pool_dma; | |
1474 | ||
1475 | pool += rx_ring_size; | |
1476 | pool_dma += rx_ring_size; | |
1477 | ||
1478 | for (i = 0; i < vptr->tx.numq; i++) { | |
1479 | vptr->tx.rings[i] = pool; | |
1480 | vptr->tx.pool_dma[i] = pool_dma; | |
1481 | pool += tx_ring_size; | |
1482 | pool_dma += tx_ring_size; | |
1483 | } | |
1484 | ||
1485 | return 0; | |
1486 | } | |
1487 | ||
1488 | static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) | |
1489 | { | |
1490 | vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | |
1491 | } | |
1492 | ||
1493 | /** | |
1494 | * velocity_alloc_rx_buf - allocate aligned receive buffer | |
1495 | * @vptr: velocity | |
1496 | * @idx: ring index | |
1497 | * | |
1498 | * Allocate a new full sized buffer for the reception of a frame and | |
1499 | * map it into PCI space for the hardware to use. The hardware | |
1500 | * requires *64* byte alignment of the buffer which makes life | |
1501 | * less fun than would be ideal. | |
1502 | */ | |
1503 | static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |
1504 | { | |
1505 | struct rx_desc *rd = &(vptr->rx.ring[idx]); | |
1506 | struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); | |
1507 | ||
1508 | rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); | |
1509 | if (rd_info->skb == NULL) | |
1510 | return -ENOMEM; | |
1511 | ||
1512 | /* | |
1513 | * Do the gymnastics to get the buffer head for data at | |
1514 | * 64byte alignment. | |
1515 | */ | |
1516 | skb_reserve(rd_info->skb, | |
1517 | 64 - ((unsigned long) rd_info->skb->data & 63)); | |
1518 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, | |
1519 | vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); | |
1520 | ||
1521 | /* | |
1522 | * Fill in the descriptor to match | |
1523 | */ | |
1524 | ||
1525 | *((u32 *) & (rd->rdesc0)) = 0; | |
1526 | rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; | |
1527 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); | |
1528 | rd->pa_high = 0; | |
1529 | return 0; | |
1530 | } | |
1531 | ||
1532 | ||
1533 | static int velocity_rx_refill(struct velocity_info *vptr) | |
1534 | { | |
1535 | int dirty = vptr->rx.dirty, done = 0; | |
1536 | ||
1537 | do { | |
1538 | struct rx_desc *rd = vptr->rx.ring + dirty; | |
1539 | ||
1540 | /* Fine for an all zero Rx desc at init time as well */ | |
1541 | if (rd->rdesc0.len & OWNED_BY_NIC) | |
1542 | break; | |
1543 | ||
1544 | if (!vptr->rx.info[dirty].skb) { | |
1545 | if (velocity_alloc_rx_buf(vptr, dirty) < 0) | |
1546 | break; | |
1547 | } | |
1548 | done++; | |
1549 | dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; | |
1550 | } while (dirty != vptr->rx.curr); | |
1551 | ||
1552 | if (done) { | |
1553 | vptr->rx.dirty = dirty; | |
1554 | vptr->rx.filled += done; | |
1555 | } | |
1556 | ||
1557 | return done; | |
1558 | } | |
1559 | ||
1560 | /** | |
1561 | * velocity_free_rd_ring - free receive ring | |
1562 | * @vptr: velocity to clean up | |
1563 | * | |
1564 | * Free the receive buffers for each ring slot and any | |
1565 | * attached socket buffers that need to go away. | |
1566 | */ | |
1567 | static void velocity_free_rd_ring(struct velocity_info *vptr) | |
1568 | { | |
1569 | int i; | |
1570 | ||
1571 | if (vptr->rx.info == NULL) | |
1572 | return; | |
1573 | ||
1574 | for (i = 0; i < vptr->options.numrx; i++) { | |
1575 | struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); | |
1576 | struct rx_desc *rd = vptr->rx.ring + i; | |
1577 | ||
1578 | memset(rd, 0, sizeof(*rd)); | |
1579 | ||
1580 | if (!rd_info->skb) | |
1581 | continue; | |
1582 | pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, | |
1583 | PCI_DMA_FROMDEVICE); | |
1584 | rd_info->skb_dma = 0; | |
1585 | ||
1586 | dev_kfree_skb(rd_info->skb); | |
1587 | rd_info->skb = NULL; | |
1588 | } | |
1589 | ||
1590 | kfree(vptr->rx.info); | |
1591 | vptr->rx.info = NULL; | |
1592 | } | |
1593 | ||
1594 | ||
1595 | ||
1596 | /** | |
1597 | * velocity_init_rd_ring - set up receive ring | |
1598 | * @vptr: velocity to configure | |
1599 | * | |
1600 | * Allocate and set up the receive buffers for each ring slot and | |
1601 | * assign them to the network adapter. | |
1602 | */ | |
1603 | static int velocity_init_rd_ring(struct velocity_info *vptr) | |
1604 | { | |
1605 | int ret = -ENOMEM; | |
1606 | ||
1607 | vptr->rx.info = kcalloc(vptr->options.numrx, | |
1608 | sizeof(struct velocity_rd_info), GFP_KERNEL); | |
1609 | if (!vptr->rx.info) | |
1610 | goto out; | |
1611 | ||
1612 | velocity_init_rx_ring_indexes(vptr); | |
1613 | ||
1614 | if (velocity_rx_refill(vptr) != vptr->options.numrx) { | |
1615 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR | |
1616 | "%s: failed to allocate RX buffer.\n", vptr->dev->name); | |
1617 | velocity_free_rd_ring(vptr); | |
1618 | goto out; | |
1619 | } | |
1620 | ||
1621 | ret = 0; | |
1622 | out: | |
1623 | return ret; | |
1624 | } | |
1625 | ||
1626 | /** | |
1627 | * velocity_init_td_ring - set up transmit ring | |
1628 | * @vptr: velocity | |
1629 | * | |
1630 | * Set up the transmit ring and chain the ring pointers together. | |
1631 | * Returns zero on success or a negative posix errno code for | |
1632 | * failure. | |
1633 | */ | |
1634 | static int velocity_init_td_ring(struct velocity_info *vptr) | |
1635 | { | |
1636 | int j; | |
1637 | ||
1638 | /* Init the TD ring entries */ | |
1639 | for (j = 0; j < vptr->tx.numq; j++) { | |
1640 | ||
1641 | vptr->tx.infos[j] = kcalloc(vptr->options.numtx, | |
1642 | sizeof(struct velocity_td_info), | |
1643 | GFP_KERNEL); | |
1644 | if (!vptr->tx.infos[j]) { | |
1645 | while (--j >= 0) | |
1646 | kfree(vptr->tx.infos[j]); | |
1647 | return -ENOMEM; | |
1648 | } | |
1649 | ||
1650 | vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; | |
1651 | } | |
1652 | return 0; | |
1653 | } | |
1654 | ||
1655 | /** | |
1656 | * velocity_free_dma_rings - free PCI ring pointers | |
1657 | * @vptr: Velocity to free from | |
1658 | * | |
1659 | * Clean up the PCI ring buffers allocated to this velocity. | |
1660 | */ | |
1661 | static void velocity_free_dma_rings(struct velocity_info *vptr) | |
1662 | { | |
1663 | const int size = vptr->options.numrx * sizeof(struct rx_desc) + | |
1664 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; | |
1665 | ||
1666 | pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); | |
1667 | } | |
1668 | ||
1669 | ||
1670 | static int velocity_init_rings(struct velocity_info *vptr, int mtu) | |
1671 | { | |
1672 | int ret; | |
1673 | ||
1674 | velocity_set_rxbufsize(vptr, mtu); | |
1675 | ||
1676 | ret = velocity_init_dma_rings(vptr); | |
1677 | if (ret < 0) | |
1678 | goto out; | |
1679 | ||
1680 | ret = velocity_init_rd_ring(vptr); | |
1681 | if (ret < 0) | |
1682 | goto err_free_dma_rings_0; | |
1683 | ||
1684 | ret = velocity_init_td_ring(vptr); | |
1685 | if (ret < 0) | |
1686 | goto err_free_rd_ring_1; | |
1687 | out: | |
1688 | return ret; | |
1689 | ||
1690 | err_free_rd_ring_1: | |
1691 | velocity_free_rd_ring(vptr); | |
1692 | err_free_dma_rings_0: | |
1693 | velocity_free_dma_rings(vptr); | |
1694 | goto out; | |
1695 | } | |
1696 | ||
1697 | /** | |
1698 | * velocity_free_tx_buf - free transmit buffer | |
1699 | * @vptr: velocity | |
1700 | * @tdinfo: buffer | |
1701 | * | |
1702 | * Release an transmit buffer. If the buffer was preallocated then | |
1703 | * recycle it, if not then unmap the buffer. | |
1704 | */ | |
1705 | static void velocity_free_tx_buf(struct velocity_info *vptr, | |
1706 | struct velocity_td_info *tdinfo, struct tx_desc *td) | |
1707 | { | |
1708 | struct sk_buff *skb = tdinfo->skb; | |
1709 | ||
1710 | /* | |
1711 | * Don't unmap the pre-allocated tx_bufs | |
1712 | */ | |
1713 | if (tdinfo->skb_dma) { | |
1714 | int i; | |
1715 | ||
1716 | for (i = 0; i < tdinfo->nskb_dma; i++) { | |
1717 | size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN); | |
1718 | ||
1719 | /* For scatter-gather */ | |
1720 | if (skb_shinfo(skb)->nr_frags > 0) | |
1721 | pktlen = max_t(size_t, pktlen, | |
1722 | td->td_buf[i].size & ~TD_QUEUE); | |
1723 | ||
1724 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], | |
1725 | le16_to_cpu(pktlen), PCI_DMA_TODEVICE); | |
1726 | } | |
1727 | } | |
1728 | dev_kfree_skb_irq(skb); | |
1729 | tdinfo->skb = NULL; | |
1730 | } | |
1731 | ||
1732 | ||
1733 | /* | |
1734 | * FIXME: could we merge this with velocity_free_tx_buf ? | |
1735 | */ | |
1736 | static void velocity_free_td_ring_entry(struct velocity_info *vptr, | |
1737 | int q, int n) | |
1738 | { | |
1739 | struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); | |
1740 | int i; | |
1741 | ||
1742 | if (td_info == NULL) | |
1743 | return; | |
1744 | ||
1745 | if (td_info->skb) { | |
1746 | for (i = 0; i < td_info->nskb_dma; i++) { | |
1747 | if (td_info->skb_dma[i]) { | |
1748 | pci_unmap_single(vptr->pdev, td_info->skb_dma[i], | |
1749 | td_info->skb->len, PCI_DMA_TODEVICE); | |
1750 | td_info->skb_dma[i] = 0; | |
1751 | } | |
1752 | } | |
1753 | dev_kfree_skb(td_info->skb); | |
1754 | td_info->skb = NULL; | |
1755 | } | |
1756 | } | |
1757 | ||
1758 | /** | |
1759 | * velocity_free_td_ring - free td ring | |
1760 | * @vptr: velocity | |
1761 | * | |
1762 | * Free up the transmit ring for this particular velocity adapter. | |
1763 | * We free the ring contents but not the ring itself. | |
1764 | */ | |
1765 | static void velocity_free_td_ring(struct velocity_info *vptr) | |
1766 | { | |
1767 | int i, j; | |
1768 | ||
1769 | for (j = 0; j < vptr->tx.numq; j++) { | |
1770 | if (vptr->tx.infos[j] == NULL) | |
1771 | continue; | |
1772 | for (i = 0; i < vptr->options.numtx; i++) | |
1773 | velocity_free_td_ring_entry(vptr, j, i); | |
1774 | ||
1775 | kfree(vptr->tx.infos[j]); | |
1776 | vptr->tx.infos[j] = NULL; | |
1777 | } | |
1778 | } | |
1779 | ||
1780 | ||
1781 | static void velocity_free_rings(struct velocity_info *vptr) | |
1782 | { | |
1783 | velocity_free_td_ring(vptr); | |
1784 | velocity_free_rd_ring(vptr); | |
1785 | velocity_free_dma_rings(vptr); | |
1786 | } | |
1787 | ||
1788 | /** | |
1789 | * velocity_error - handle error from controller | |
1790 | * @vptr: velocity | |
1791 | * @status: card status | |
1792 | * | |
1793 | * Process an error report from the hardware and attempt to recover | |
1794 | * the card itself. At the moment we cannot recover from some | |
1795 | * theoretically impossible errors but this could be fixed using | |
1796 | * the pci_device_failed logic to bounce the hardware | |
1797 | * | |
1798 | */ | |
1799 | static void velocity_error(struct velocity_info *vptr, int status) | |
1800 | { | |
1801 | ||
1802 | if (status & ISR_TXSTLI) { | |
1803 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1804 | ||
1805 | printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); | |
1806 | BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); | |
1807 | writew(TRDCSR_RUN, ®s->TDCSRClr); | |
1808 | netif_stop_queue(vptr->dev); | |
1809 | ||
1810 | /* FIXME: port over the pci_device_failed code and use it | |
1811 | here */ | |
1812 | } | |
1813 | ||
1814 | if (status & ISR_SRCI) { | |
1815 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1816 | int linked; | |
1817 | ||
1818 | if (vptr->options.spd_dpx == SPD_DPX_AUTO) { | |
1819 | vptr->mii_status = check_connection_type(regs); | |
1820 | ||
1821 | /* | |
1822 | * If it is a 3119, disable frame bursting in | |
1823 | * halfduplex mode and enable it in fullduplex | |
1824 | * mode | |
1825 | */ | |
1826 | if (vptr->rev_id < REV_ID_VT3216_A0) { | |
1827 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
1828 | BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); | |
1829 | else | |
1830 | BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); | |
1831 | } | |
1832 | /* | |
1833 | * Only enable CD heart beat counter in 10HD mode | |
1834 | */ | |
1835 | if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) | |
1836 | BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); | |
1837 | else | |
1838 | BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); | |
1839 | ||
1840 | setup_queue_timers(vptr); | |
1841 | } | |
1842 | /* | |
1843 | * Get link status from PHYSR0 | |
1844 | */ | |
1845 | linked = readb(®s->PHYSR0) & PHYSR0_LINKGD; | |
1846 | ||
1847 | if (linked) { | |
1848 | vptr->mii_status &= ~VELOCITY_LINK_FAIL; | |
1849 | netif_carrier_on(vptr->dev); | |
1850 | } else { | |
1851 | vptr->mii_status |= VELOCITY_LINK_FAIL; | |
1852 | netif_carrier_off(vptr->dev); | |
1853 | } | |
1854 | ||
1855 | velocity_print_link_status(vptr); | |
1856 | enable_flow_control_ability(vptr); | |
1857 | ||
1858 | /* | |
1859 | * Re-enable auto-polling because SRCI will disable | |
1860 | * auto-polling | |
1861 | */ | |
1862 | ||
1863 | enable_mii_autopoll(regs); | |
1864 | ||
1865 | if (vptr->mii_status & VELOCITY_LINK_FAIL) | |
1866 | netif_stop_queue(vptr->dev); | |
1867 | else | |
1868 | netif_wake_queue(vptr->dev); | |
1869 | ||
1870 | }; | |
1871 | if (status & ISR_MIBFI) | |
1872 | velocity_update_hw_mibs(vptr); | |
1873 | if (status & ISR_LSTEI) | |
1874 | mac_rx_queue_wake(vptr->mac_regs); | |
1875 | } | |
1876 | ||
1877 | /** | |
1878 | * tx_srv - transmit interrupt service | |
1879 | * @vptr; Velocity | |
1880 | * @status: | |
1881 | * | |
1882 | * Scan the queues looking for transmitted packets that | |
1883 | * we can complete and clean up. Update any statistics as | |
1884 | * necessary/ | |
1885 | */ | |
1886 | static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | |
1887 | { | |
1888 | struct tx_desc *td; | |
1889 | int qnum; | |
1890 | int full = 0; | |
1891 | int idx; | |
1892 | int works = 0; | |
1893 | struct velocity_td_info *tdinfo; | |
1894 | struct net_device_stats *stats = &vptr->dev->stats; | |
1895 | ||
1896 | for (qnum = 0; qnum < vptr->tx.numq; qnum++) { | |
1897 | for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; | |
1898 | idx = (idx + 1) % vptr->options.numtx) { | |
1899 | ||
1900 | /* | |
1901 | * Get Tx Descriptor | |
1902 | */ | |
1903 | td = &(vptr->tx.rings[qnum][idx]); | |
1904 | tdinfo = &(vptr->tx.infos[qnum][idx]); | |
1905 | ||
1906 | if (td->tdesc0.len & OWNED_BY_NIC) | |
1907 | break; | |
1908 | ||
1909 | if ((works++ > 15)) | |
1910 | break; | |
1911 | ||
1912 | if (td->tdesc0.TSR & TSR0_TERR) { | |
1913 | stats->tx_errors++; | |
1914 | stats->tx_dropped++; | |
1915 | if (td->tdesc0.TSR & TSR0_CDH) | |
1916 | stats->tx_heartbeat_errors++; | |
1917 | if (td->tdesc0.TSR & TSR0_CRS) | |
1918 | stats->tx_carrier_errors++; | |
1919 | if (td->tdesc0.TSR & TSR0_ABT) | |
1920 | stats->tx_aborted_errors++; | |
1921 | if (td->tdesc0.TSR & TSR0_OWC) | |
1922 | stats->tx_window_errors++; | |
1923 | } else { | |
1924 | stats->tx_packets++; | |
1925 | stats->tx_bytes += tdinfo->skb->len; | |
1926 | } | |
1927 | velocity_free_tx_buf(vptr, tdinfo, td); | |
1928 | vptr->tx.used[qnum]--; | |
1929 | } | |
1930 | vptr->tx.tail[qnum] = idx; | |
1931 | ||
1932 | if (AVAIL_TD(vptr, qnum) < 1) | |
1933 | full = 1; | |
1934 | } | |
1935 | /* | |
1936 | * Look to see if we should kick the transmit network | |
1937 | * layer for more work. | |
1938 | */ | |
1939 | if (netif_queue_stopped(vptr->dev) && (full == 0) && | |
1940 | (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { | |
1941 | netif_wake_queue(vptr->dev); | |
1942 | } | |
1943 | return works; | |
1944 | } | |
1945 | ||
1946 | /** | |
1947 | * velocity_rx_csum - checksum process | |
1948 | * @rd: receive packet descriptor | |
1949 | * @skb: network layer packet buffer | |
1950 | * | |
1951 | * Process the status bits for the received packet and determine | |
1952 | * if the checksum was computed and verified by the hardware | |
1953 | */ | |
1954 | static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) | |
1955 | { | |
1956 | skb->ip_summed = CHECKSUM_NONE; | |
1957 | ||
1958 | if (rd->rdesc1.CSM & CSM_IPKT) { | |
1959 | if (rd->rdesc1.CSM & CSM_IPOK) { | |
1960 | if ((rd->rdesc1.CSM & CSM_TCPKT) || | |
1961 | (rd->rdesc1.CSM & CSM_UDPKT)) { | |
1962 | if (!(rd->rdesc1.CSM & CSM_TUPOK)) | |
1963 | return; | |
1964 | } | |
1965 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1966 | } | |
1967 | } | |
1968 | } | |
1969 | ||
1970 | /** | |
1971 | * velocity_rx_copy - in place Rx copy for small packets | |
1972 | * @rx_skb: network layer packet buffer candidate | |
1973 | * @pkt_size: received data size | |
1974 | * @rd: receive packet descriptor | |
1975 | * @dev: network device | |
1976 | * | |
1977 | * Replace the current skb that is scheduled for Rx processing by a | |
1978 | * shorter, immediatly allocated skb, if the received packet is small | |
1979 | * enough. This function returns a negative value if the received | |
1980 | * packet is too big or if memory is exhausted. | |
1981 | */ | |
1982 | static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | |
1983 | struct velocity_info *vptr) | |
1984 | { | |
1985 | int ret = -1; | |
1986 | if (pkt_size < rx_copybreak) { | |
1987 | struct sk_buff *new_skb; | |
1988 | ||
1989 | new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); | |
1990 | if (new_skb) { | |
1991 | new_skb->ip_summed = rx_skb[0]->ip_summed; | |
1992 | skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); | |
1993 | *rx_skb = new_skb; | |
1994 | ret = 0; | |
1995 | } | |
1996 | ||
1997 | } | |
1998 | return ret; | |
1999 | } | |
2000 | ||
2001 | /** | |
2002 | * velocity_iph_realign - IP header alignment | |
2003 | * @vptr: velocity we are handling | |
2004 | * @skb: network layer packet buffer | |
2005 | * @pkt_size: received data size | |
2006 | * | |
2007 | * Align IP header on a 2 bytes boundary. This behavior can be | |
2008 | * configured by the user. | |
2009 | */ | |
2010 | static inline void velocity_iph_realign(struct velocity_info *vptr, | |
2011 | struct sk_buff *skb, int pkt_size) | |
2012 | { | |
2013 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { | |
2014 | memmove(skb->data + 2, skb->data, pkt_size); | |
2015 | skb_reserve(skb, 2); | |
2016 | } | |
2017 | } | |
2018 | ||
2019 | ||
2020 | /** | |
2021 | * velocity_receive_frame - received packet processor | |
2022 | * @vptr: velocity we are handling | |
2023 | * @idx: ring index | |
2024 | * | |
2025 | * A packet has arrived. We process the packet and if appropriate | |
2026 | * pass the frame up the network stack | |
2027 | */ | |
2028 | static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |
2029 | { | |
2030 | void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); | |
2031 | struct net_device_stats *stats = &vptr->dev->stats; | |
2032 | struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); | |
2033 | struct rx_desc *rd = &(vptr->rx.ring[idx]); | |
2034 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; | |
2035 | struct sk_buff *skb; | |
2036 | ||
2037 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { | |
2038 | VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); | |
2039 | stats->rx_length_errors++; | |
2040 | return -EINVAL; | |
2041 | } | |
2042 | ||
2043 | if (rd->rdesc0.RSR & RSR_MAR) | |
2044 | stats->multicast++; | |
2045 | ||
2046 | skb = rd_info->skb; | |
2047 | ||
2048 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, | |
2049 | vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); | |
2050 | ||
2051 | /* | |
2052 | * Drop frame not meeting IEEE 802.3 | |
2053 | */ | |
2054 | ||
2055 | if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { | |
2056 | if (rd->rdesc0.RSR & RSR_RL) { | |
2057 | stats->rx_length_errors++; | |
2058 | return -EINVAL; | |
2059 | } | |
2060 | } | |
2061 | ||
2062 | pci_action = pci_dma_sync_single_for_device; | |
2063 | ||
2064 | velocity_rx_csum(rd, skb); | |
2065 | ||
2066 | if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { | |
2067 | velocity_iph_realign(vptr, skb, pkt_len); | |
2068 | pci_action = pci_unmap_single; | |
2069 | rd_info->skb = NULL; | |
2070 | } | |
2071 | ||
2072 | pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, | |
2073 | PCI_DMA_FROMDEVICE); | |
2074 | ||
2075 | skb_put(skb, pkt_len - 4); | |
2076 | skb->protocol = eth_type_trans(skb, vptr->dev); | |
2077 | ||
2078 | if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) { | |
2079 | vlan_hwaccel_rx(skb, vptr->vlgrp, | |
2080 | swab16(le16_to_cpu(rd->rdesc1.PQTAG))); | |
2081 | } else | |
2082 | netif_rx(skb); | |
2083 | ||
2084 | stats->rx_bytes += pkt_len; | |
2085 | ||
2086 | return 0; | |
2087 | } | |
2088 | ||
2089 | ||
2090 | /** | |
2091 | * velocity_rx_srv - service RX interrupt | |
2092 | * @vptr: velocity | |
2093 | * @status: adapter status (unused) | |
2094 | * | |
2095 | * Walk the receive ring of the velocity adapter and remove | |
2096 | * any received packets from the receive queue. Hand the ring | |
2097 | * slots back to the adapter for reuse. | |
2098 | */ | |
2099 | static int velocity_rx_srv(struct velocity_info *vptr, int status, | |
2100 | int budget_left) | |
2101 | { | |
2102 | struct net_device_stats *stats = &vptr->dev->stats; | |
2103 | int rd_curr = vptr->rx.curr; | |
2104 | int works = 0; | |
2105 | ||
2106 | while (works < budget_left) { | |
2107 | struct rx_desc *rd = vptr->rx.ring + rd_curr; | |
2108 | ||
2109 | if (!vptr->rx.info[rd_curr].skb) | |
2110 | break; | |
2111 | ||
2112 | if (rd->rdesc0.len & OWNED_BY_NIC) | |
2113 | break; | |
2114 | ||
2115 | rmb(); | |
2116 | ||
2117 | /* | |
2118 | * Don't drop CE or RL error frame although RXOK is off | |
2119 | */ | |
2120 | if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { | |
2121 | if (velocity_receive_frame(vptr, rd_curr) < 0) | |
2122 | stats->rx_dropped++; | |
2123 | } else { | |
2124 | if (rd->rdesc0.RSR & RSR_CRC) | |
2125 | stats->rx_crc_errors++; | |
2126 | if (rd->rdesc0.RSR & RSR_FAE) | |
2127 | stats->rx_frame_errors++; | |
2128 | ||
2129 | stats->rx_dropped++; | |
2130 | } | |
2131 | ||
2132 | rd->size |= RX_INTEN; | |
2133 | ||
2134 | rd_curr++; | |
2135 | if (rd_curr >= vptr->options.numrx) | |
2136 | rd_curr = 0; | |
2137 | works++; | |
2138 | } | |
2139 | ||
2140 | vptr->rx.curr = rd_curr; | |
2141 | ||
2142 | if ((works > 0) && (velocity_rx_refill(vptr) > 0)) | |
2143 | velocity_give_many_rx_descs(vptr); | |
2144 | ||
2145 | VAR_USED(stats); | |
2146 | return works; | |
2147 | } | |
2148 | ||
2149 | static int velocity_poll(struct napi_struct *napi, int budget) | |
2150 | { | |
2151 | struct velocity_info *vptr = container_of(napi, | |
2152 | struct velocity_info, napi); | |
2153 | unsigned int rx_done; | |
2154 | u32 isr_status; | |
2155 | ||
2156 | spin_lock(&vptr->lock); | |
2157 | isr_status = mac_read_isr(vptr->mac_regs); | |
2158 | ||
2159 | /* Ack the interrupt */ | |
2160 | mac_write_isr(vptr->mac_regs, isr_status); | |
2161 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | |
2162 | velocity_error(vptr, isr_status); | |
2163 | ||
2164 | /* | |
2165 | * Do rx and tx twice for performance (taken from the VIA | |
2166 | * out-of-tree driver). | |
2167 | */ | |
2168 | rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); | |
2169 | velocity_tx_srv(vptr, isr_status); | |
2170 | rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); | |
2171 | velocity_tx_srv(vptr, isr_status); | |
2172 | ||
2173 | spin_unlock(&vptr->lock); | |
2174 | ||
2175 | /* If budget not fully consumed, exit the polling mode */ | |
2176 | if (rx_done < budget) { | |
2177 | napi_complete(napi); | |
2178 | mac_enable_int(vptr->mac_regs); | |
2179 | } | |
2180 | ||
2181 | return rx_done; | |
2182 | } | |
2183 | ||
2184 | /** | |
2185 | * velocity_intr - interrupt callback | |
2186 | * @irq: interrupt number | |
2187 | * @dev_instance: interrupting device | |
2188 | * | |
2189 | * Called whenever an interrupt is generated by the velocity | |
2190 | * adapter IRQ line. We may not be the source of the interrupt | |
2191 | * and need to identify initially if we are, and if not exit as | |
2192 | * efficiently as possible. | |
2193 | */ | |
2194 | static irqreturn_t velocity_intr(int irq, void *dev_instance) | |
2195 | { | |
2196 | struct net_device *dev = dev_instance; | |
2197 | struct velocity_info *vptr = netdev_priv(dev); | |
2198 | u32 isr_status; | |
2199 | ||
2200 | spin_lock(&vptr->lock); | |
2201 | isr_status = mac_read_isr(vptr->mac_regs); | |
2202 | ||
2203 | /* Not us ? */ | |
2204 | if (isr_status == 0) { | |
2205 | spin_unlock(&vptr->lock); | |
2206 | return IRQ_NONE; | |
2207 | } | |
2208 | ||
2209 | if (likely(napi_schedule_prep(&vptr->napi))) { | |
2210 | mac_disable_int(vptr->mac_regs); | |
2211 | __napi_schedule(&vptr->napi); | |
2212 | } | |
2213 | spin_unlock(&vptr->lock); | |
2214 | ||
2215 | return IRQ_HANDLED; | |
2216 | } | |
2217 | ||
2218 | /** | |
2219 | * velocity_open - interface activation callback | |
2220 | * @dev: network layer device to open | |
2221 | * | |
2222 | * Called when the network layer brings the interface up. Returns | |
2223 | * a negative posix error code on failure, or zero on success. | |
2224 | * | |
2225 | * All the ring allocation and set up is done on open for this | |
2226 | * adapter to minimise memory usage when inactive | |
2227 | */ | |
2228 | static int velocity_open(struct net_device *dev) | |
2229 | { | |
2230 | struct velocity_info *vptr = netdev_priv(dev); | |
2231 | int ret; | |
2232 | ||
2233 | ret = velocity_init_rings(vptr, dev->mtu); | |
2234 | if (ret < 0) | |
2235 | goto out; | |
2236 | ||
2237 | /* Ensure chip is running */ | |
2238 | pci_set_power_state(vptr->pdev, PCI_D0); | |
2239 | ||
2240 | velocity_give_many_rx_descs(vptr); | |
2241 | ||
2242 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | |
2243 | ||
2244 | ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, | |
2245 | dev->name, dev); | |
2246 | if (ret < 0) { | |
2247 | /* Power down the chip */ | |
2248 | pci_set_power_state(vptr->pdev, PCI_D3hot); | |
2249 | velocity_free_rings(vptr); | |
2250 | goto out; | |
2251 | } | |
2252 | ||
2253 | mac_enable_int(vptr->mac_regs); | |
2254 | netif_start_queue(dev); | |
2255 | napi_enable(&vptr->napi); | |
2256 | vptr->flags |= VELOCITY_FLAGS_OPENED; | |
2257 | out: | |
2258 | return ret; | |
2259 | } | |
2260 | ||
2261 | /** | |
2262 | * velocity_shutdown - shut down the chip | |
2263 | * @vptr: velocity to deactivate | |
2264 | * | |
2265 | * Shuts down the internal operations of the velocity and | |
2266 | * disables interrupts, autopolling, transmit and receive | |
2267 | */ | |
2268 | static void velocity_shutdown(struct velocity_info *vptr) | |
2269 | { | |
2270 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
2271 | mac_disable_int(regs); | |
2272 | writel(CR0_STOP, ®s->CR0Set); | |
2273 | writew(0xFFFF, ®s->TDCSRClr); | |
2274 | writeb(0xFF, ®s->RDCSRClr); | |
2275 | safe_disable_mii_autopoll(regs); | |
2276 | mac_clear_isr(regs); | |
2277 | } | |
2278 | ||
2279 | /** | |
2280 | * velocity_change_mtu - MTU change callback | |
2281 | * @dev: network device | |
2282 | * @new_mtu: desired MTU | |
2283 | * | |
2284 | * Handle requests from the networking layer for MTU change on | |
2285 | * this interface. It gets called on a change by the network layer. | |
2286 | * Return zero for success or negative posix error code. | |
2287 | */ | |
2288 | static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |
2289 | { | |
2290 | struct velocity_info *vptr = netdev_priv(dev); | |
2291 | int ret = 0; | |
2292 | ||
2293 | if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { | |
2294 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", | |
2295 | vptr->dev->name); | |
2296 | ret = -EINVAL; | |
2297 | goto out_0; | |
2298 | } | |
2299 | ||
2300 | if (!netif_running(dev)) { | |
2301 | dev->mtu = new_mtu; | |
2302 | goto out_0; | |
2303 | } | |
2304 | ||
2305 | if (dev->mtu != new_mtu) { | |
2306 | struct velocity_info *tmp_vptr; | |
2307 | unsigned long flags; | |
2308 | struct rx_info rx; | |
2309 | struct tx_info tx; | |
2310 | ||
2311 | tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); | |
2312 | if (!tmp_vptr) { | |
2313 | ret = -ENOMEM; | |
2314 | goto out_0; | |
2315 | } | |
2316 | ||
2317 | tmp_vptr->dev = dev; | |
2318 | tmp_vptr->pdev = vptr->pdev; | |
2319 | tmp_vptr->options = vptr->options; | |
2320 | tmp_vptr->tx.numq = vptr->tx.numq; | |
2321 | ||
2322 | ret = velocity_init_rings(tmp_vptr, new_mtu); | |
2323 | if (ret < 0) | |
2324 | goto out_free_tmp_vptr_1; | |
2325 | ||
2326 | spin_lock_irqsave(&vptr->lock, flags); | |
2327 | ||
2328 | netif_stop_queue(dev); | |
2329 | velocity_shutdown(vptr); | |
2330 | ||
2331 | rx = vptr->rx; | |
2332 | tx = vptr->tx; | |
2333 | ||
2334 | vptr->rx = tmp_vptr->rx; | |
2335 | vptr->tx = tmp_vptr->tx; | |
2336 | ||
2337 | tmp_vptr->rx = rx; | |
2338 | tmp_vptr->tx = tx; | |
2339 | ||
2340 | dev->mtu = new_mtu; | |
2341 | ||
2342 | velocity_give_many_rx_descs(vptr); | |
2343 | ||
2344 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | |
2345 | ||
2346 | mac_enable_int(vptr->mac_regs); | |
2347 | netif_start_queue(dev); | |
2348 | ||
2349 | spin_unlock_irqrestore(&vptr->lock, flags); | |
2350 | ||
2351 | velocity_free_rings(tmp_vptr); | |
2352 | ||
2353 | out_free_tmp_vptr_1: | |
2354 | kfree(tmp_vptr); | |
2355 | } | |
2356 | out_0: | |
2357 | return ret; | |
2358 | } | |
2359 | ||
2360 | /** | |
2361 | * velocity_mii_ioctl - MII ioctl handler | |
2362 | * @dev: network device | |
2363 | * @ifr: the ifreq block for the ioctl | |
2364 | * @cmd: the command | |
2365 | * | |
2366 | * Process MII requests made via ioctl from the network layer. These | |
2367 | * are used by tools like kudzu to interrogate the link state of the | |
2368 | * hardware | |
2369 | */ | |
2370 | static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
2371 | { | |
2372 | struct velocity_info *vptr = netdev_priv(dev); | |
2373 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
2374 | unsigned long flags; | |
2375 | struct mii_ioctl_data *miidata = if_mii(ifr); | |
2376 | int err; | |
2377 | ||
2378 | switch (cmd) { | |
2379 | case SIOCGMIIPHY: | |
2380 | miidata->phy_id = readb(®s->MIIADR) & 0x1f; | |
2381 | break; | |
2382 | case SIOCGMIIREG: | |
2383 | if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) | |
2384 | return -ETIMEDOUT; | |
2385 | break; | |
2386 | case SIOCSMIIREG: | |
2387 | spin_lock_irqsave(&vptr->lock, flags); | |
2388 | err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); | |
2389 | spin_unlock_irqrestore(&vptr->lock, flags); | |
2390 | check_connection_type(vptr->mac_regs); | |
2391 | if (err) | |
2392 | return err; | |
2393 | break; | |
2394 | default: | |
2395 | return -EOPNOTSUPP; | |
2396 | } | |
2397 | return 0; | |
2398 | } | |
2399 | ||
2400 | ||
2401 | /** | |
2402 | * velocity_ioctl - ioctl entry point | |
2403 | * @dev: network device | |
2404 | * @rq: interface request ioctl | |
2405 | * @cmd: command code | |
2406 | * | |
2407 | * Called when the user issues an ioctl request to the network | |
2408 | * device in question. The velocity interface supports MII. | |
2409 | */ | |
2410 | static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
2411 | { | |
2412 | struct velocity_info *vptr = netdev_priv(dev); | |
2413 | int ret; | |
2414 | ||
2415 | /* If we are asked for information and the device is power | |
2416 | saving then we need to bring the device back up to talk to it */ | |
2417 | ||
2418 | if (!netif_running(dev)) | |
2419 | pci_set_power_state(vptr->pdev, PCI_D0); | |
2420 | ||
2421 | switch (cmd) { | |
2422 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | |
2423 | case SIOCGMIIREG: /* Read MII PHY register. */ | |
2424 | case SIOCSMIIREG: /* Write to MII PHY register. */ | |
2425 | ret = velocity_mii_ioctl(dev, rq, cmd); | |
2426 | break; | |
2427 | ||
2428 | default: | |
2429 | ret = -EOPNOTSUPP; | |
2430 | } | |
2431 | if (!netif_running(dev)) | |
2432 | pci_set_power_state(vptr->pdev, PCI_D3hot); | |
2433 | ||
2434 | ||
2435 | return ret; | |
2436 | } | |
2437 | ||
2438 | /** | |
2439 | * velocity_get_status - statistics callback | |
2440 | * @dev: network device | |
2441 | * | |
2442 | * Callback from the network layer to allow driver statistics | |
2443 | * to be resynchronized with hardware collected state. In the | |
2444 | * case of the velocity we need to pull the MIB counters from | |
2445 | * the hardware into the counters before letting the network | |
2446 | * layer display them. | |
2447 | */ | |
2448 | static struct net_device_stats *velocity_get_stats(struct net_device *dev) | |
2449 | { | |
2450 | struct velocity_info *vptr = netdev_priv(dev); | |
2451 | ||
2452 | /* If the hardware is down, don't touch MII */ | |
2453 | if (!netif_running(dev)) | |
2454 | return &dev->stats; | |
2455 | ||
2456 | spin_lock_irq(&vptr->lock); | |
2457 | velocity_update_hw_mibs(vptr); | |
2458 | spin_unlock_irq(&vptr->lock); | |
2459 | ||
2460 | dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; | |
2461 | dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; | |
2462 | dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; | |
2463 | ||
2464 | // unsigned long rx_dropped; /* no space in linux buffers */ | |
2465 | dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; | |
2466 | /* detailed rx_errors: */ | |
2467 | // unsigned long rx_length_errors; | |
2468 | // unsigned long rx_over_errors; /* receiver ring buff overflow */ | |
2469 | dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; | |
2470 | // unsigned long rx_frame_errors; /* recv'd frame alignment error */ | |
2471 | // unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | |
2472 | // unsigned long rx_missed_errors; /* receiver missed packet */ | |
2473 | ||
2474 | /* detailed tx_errors */ | |
2475 | // unsigned long tx_fifo_errors; | |
2476 | ||
2477 | return &dev->stats; | |
2478 | } | |
2479 | ||
2480 | /** | |
2481 | * velocity_close - close adapter callback | |
2482 | * @dev: network device | |
2483 | * | |
2484 | * Callback from the network layer when the velocity is being | |
2485 | * deactivated by the network layer | |
2486 | */ | |
2487 | static int velocity_close(struct net_device *dev) | |
2488 | { | |
2489 | struct velocity_info *vptr = netdev_priv(dev); | |
2490 | ||
2491 | napi_disable(&vptr->napi); | |
2492 | netif_stop_queue(dev); | |
2493 | velocity_shutdown(vptr); | |
2494 | ||
2495 | if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) | |
2496 | velocity_get_ip(vptr); | |
2497 | if (dev->irq != 0) | |
2498 | free_irq(dev->irq, dev); | |
2499 | ||
2500 | /* Power down the chip */ | |
2501 | pci_set_power_state(vptr->pdev, PCI_D3hot); | |
2502 | ||
2503 | velocity_free_rings(vptr); | |
2504 | ||
2505 | vptr->flags &= (~VELOCITY_FLAGS_OPENED); | |
2506 | return 0; | |
2507 | } | |
2508 | ||
2509 | /** | |
2510 | * velocity_xmit - transmit packet callback | |
2511 | * @skb: buffer to transmit | |
2512 | * @dev: network device | |
2513 | * | |
2514 | * Called by the networ layer to request a packet is queued to | |
2515 | * the velocity. Returns zero on success. | |
2516 | */ | |
2517 | static netdev_tx_t velocity_xmit(struct sk_buff *skb, | |
2518 | struct net_device *dev) | |
2519 | { | |
2520 | struct velocity_info *vptr = netdev_priv(dev); | |
2521 | int qnum = 0; | |
2522 | struct tx_desc *td_ptr; | |
2523 | struct velocity_td_info *tdinfo; | |
2524 | unsigned long flags; | |
2525 | int pktlen; | |
2526 | int index, prev; | |
2527 | int i = 0; | |
2528 | ||
2529 | if (skb_padto(skb, ETH_ZLEN)) | |
2530 | goto out; | |
2531 | ||
2532 | /* The hardware can handle at most 7 memory segments, so merge | |
2533 | * the skb if there are more */ | |
2534 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { | |
2535 | kfree_skb(skb); | |
2536 | return NETDEV_TX_OK; | |
2537 | } | |
2538 | ||
2539 | pktlen = skb_shinfo(skb)->nr_frags == 0 ? | |
2540 | max_t(unsigned int, skb->len, ETH_ZLEN) : | |
2541 | skb_headlen(skb); | |
2542 | ||
2543 | spin_lock_irqsave(&vptr->lock, flags); | |
2544 | ||
2545 | index = vptr->tx.curr[qnum]; | |
2546 | td_ptr = &(vptr->tx.rings[qnum][index]); | |
2547 | tdinfo = &(vptr->tx.infos[qnum][index]); | |
2548 | ||
2549 | td_ptr->tdesc1.TCR = TCR0_TIC; | |
2550 | td_ptr->td_buf[0].size &= ~TD_QUEUE; | |
2551 | ||
2552 | /* | |
2553 | * Map the linear network buffer into PCI space and | |
2554 | * add it to the transmit ring. | |
2555 | */ | |
2556 | tdinfo->skb = skb; | |
2557 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); | |
2558 | td_ptr->tdesc0.len = cpu_to_le16(pktlen); | |
2559 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | |
2560 | td_ptr->td_buf[0].pa_high = 0; | |
2561 | td_ptr->td_buf[0].size = cpu_to_le16(pktlen); | |
2562 | ||
2563 | /* Handle fragments */ | |
2564 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
2565 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
2566 | ||
2567 | tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page, | |
2568 | frag->page_offset, frag->size, | |
2569 | PCI_DMA_TODEVICE); | |
2570 | ||
2571 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | |
2572 | td_ptr->td_buf[i + 1].pa_high = 0; | |
2573 | td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); | |
2574 | } | |
2575 | tdinfo->nskb_dma = i + 1; | |
2576 | ||
2577 | td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; | |
2578 | ||
2579 | if (vptr->vlgrp && vlan_tx_tag_present(skb)) { | |
2580 | td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | |
2581 | td_ptr->tdesc1.TCR |= TCR0_VETAG; | |
2582 | } | |
2583 | ||
2584 | /* | |
2585 | * Handle hardware checksum | |
2586 | */ | |
2587 | if ((dev->features & NETIF_F_IP_CSUM) && | |
2588 | (skb->ip_summed == CHECKSUM_PARTIAL)) { | |
2589 | const struct iphdr *ip = ip_hdr(skb); | |
2590 | if (ip->protocol == IPPROTO_TCP) | |
2591 | td_ptr->tdesc1.TCR |= TCR0_TCPCK; | |
2592 | else if (ip->protocol == IPPROTO_UDP) | |
2593 | td_ptr->tdesc1.TCR |= (TCR0_UDPCK); | |
2594 | td_ptr->tdesc1.TCR |= TCR0_IPCK; | |
2595 | } | |
2596 | ||
2597 | prev = index - 1; | |
2598 | if (prev < 0) | |
2599 | prev = vptr->options.numtx - 1; | |
2600 | td_ptr->tdesc0.len |= OWNED_BY_NIC; | |
2601 | vptr->tx.used[qnum]++; | |
2602 | vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; | |
2603 | ||
2604 | if (AVAIL_TD(vptr, qnum) < 1) | |
2605 | netif_stop_queue(dev); | |
2606 | ||
2607 | td_ptr = &(vptr->tx.rings[qnum][prev]); | |
2608 | td_ptr->td_buf[0].size |= TD_QUEUE; | |
2609 | mac_tx_queue_wake(vptr->mac_regs, qnum); | |
2610 | ||
2611 | dev->trans_start = jiffies; | |
2612 | spin_unlock_irqrestore(&vptr->lock, flags); | |
2613 | out: | |
2614 | return NETDEV_TX_OK; | |
2615 | } | |
2616 | ||
2617 | ||
2618 | static const struct net_device_ops velocity_netdev_ops = { | |
2619 | .ndo_open = velocity_open, | |
2620 | .ndo_stop = velocity_close, | |
2621 | .ndo_start_xmit = velocity_xmit, | |
2622 | .ndo_get_stats = velocity_get_stats, | |
2623 | .ndo_validate_addr = eth_validate_addr, | |
2624 | .ndo_set_mac_address = eth_mac_addr, | |
2625 | .ndo_set_multicast_list = velocity_set_multi, | |
2626 | .ndo_change_mtu = velocity_change_mtu, | |
2627 | .ndo_do_ioctl = velocity_ioctl, | |
2628 | .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, | |
2629 | .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, | |
2630 | .ndo_vlan_rx_register = velocity_vlan_rx_register, | |
2631 | }; | |
2632 | ||
2633 | /** | |
2634 | * velocity_init_info - init private data | |
2635 | * @pdev: PCI device | |
2636 | * @vptr: Velocity info | |
2637 | * @info: Board type | |
2638 | * | |
2639 | * Set up the initial velocity_info struct for the device that has been | |
2640 | * discovered. | |
2641 | */ | |
2642 | static void __devinit velocity_init_info(struct pci_dev *pdev, | |
2643 | struct velocity_info *vptr, | |
2644 | const struct velocity_info_tbl *info) | |
2645 | { | |
2646 | memset(vptr, 0, sizeof(struct velocity_info)); | |
2647 | ||
2648 | vptr->pdev = pdev; | |
2649 | vptr->chip_id = info->chip_id; | |
2650 | vptr->tx.numq = info->txqueue; | |
2651 | vptr->multicast_limit = MCAM_SIZE; | |
2652 | spin_lock_init(&vptr->lock); | |
2653 | } | |
2654 | ||
2655 | /** | |
2656 | * velocity_get_pci_info - retrieve PCI info for device | |
2657 | * @vptr: velocity device | |
2658 | * @pdev: PCI device it matches | |
2659 | * | |
2660 | * Retrieve the PCI configuration space data that interests us from | |
2661 | * the kernel PCI layer | |
2662 | */ | |
2663 | static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) | |
2664 | { | |
2665 | vptr->rev_id = pdev->revision; | |
2666 | ||
2667 | pci_set_master(pdev); | |
2668 | ||
2669 | vptr->ioaddr = pci_resource_start(pdev, 0); | |
2670 | vptr->memaddr = pci_resource_start(pdev, 1); | |
2671 | ||
2672 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { | |
2673 | dev_err(&pdev->dev, | |
2674 | "region #0 is not an I/O resource, aborting.\n"); | |
2675 | return -EINVAL; | |
2676 | } | |
2677 | ||
2678 | if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) { | |
2679 | dev_err(&pdev->dev, | |
2680 | "region #1 is an I/O resource, aborting.\n"); | |
2681 | return -EINVAL; | |
2682 | } | |
2683 | ||
2684 | if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) { | |
2685 | dev_err(&pdev->dev, "region #1 is too small.\n"); | |
2686 | return -EINVAL; | |
2687 | } | |
2688 | vptr->pdev = pdev; | |
2689 | ||
2690 | return 0; | |
2691 | } | |
2692 | ||
2693 | /** | |
2694 | * velocity_print_info - per driver data | |
2695 | * @vptr: velocity | |
2696 | * | |
2697 | * Print per driver data as the kernel driver finds Velocity | |
2698 | * hardware | |
2699 | */ | |
2700 | static void __devinit velocity_print_info(struct velocity_info *vptr) | |
2701 | { | |
2702 | struct net_device *dev = vptr->dev; | |
2703 | ||
2704 | printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); | |
2705 | printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", | |
2706 | dev->name, | |
2707 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | |
2708 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | |
2709 | } | |
2710 | ||
2711 | static u32 velocity_get_link(struct net_device *dev) | |
2712 | { | |
2713 | struct velocity_info *vptr = netdev_priv(dev); | |
2714 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
2715 | return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; | |
2716 | } | |
2717 | ||
2718 | ||
2719 | /** | |
2720 | * velocity_found1 - set up discovered velocity card | |
2721 | * @pdev: PCI device | |
2722 | * @ent: PCI device table entry that matched | |
2723 | * | |
2724 | * Configure a discovered adapter from scratch. Return a negative | |
2725 | * errno error code on failure paths. | |
2726 | */ | |
2727 | static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent) | |
2728 | { | |
2729 | static int first = 1; | |
2730 | struct net_device *dev; | |
2731 | int i; | |
2732 | const char *drv_string; | |
2733 | const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; | |
2734 | struct velocity_info *vptr; | |
2735 | struct mac_regs __iomem *regs; | |
2736 | int ret = -ENOMEM; | |
2737 | ||
2738 | /* FIXME: this driver, like almost all other ethernet drivers, | |
2739 | * can support more than MAX_UNITS. | |
2740 | */ | |
2741 | if (velocity_nics >= MAX_UNITS) { | |
2742 | dev_notice(&pdev->dev, "already found %d NICs.\n", | |
2743 | velocity_nics); | |
2744 | return -ENODEV; | |
2745 | } | |
2746 | ||
2747 | dev = alloc_etherdev(sizeof(struct velocity_info)); | |
2748 | if (!dev) { | |
2749 | dev_err(&pdev->dev, "allocate net device failed.\n"); | |
2750 | goto out; | |
2751 | } | |
2752 | ||
2753 | /* Chain it all together */ | |
2754 | ||
2755 | SET_NETDEV_DEV(dev, &pdev->dev); | |
2756 | vptr = netdev_priv(dev); | |
2757 | ||
2758 | ||
2759 | if (first) { | |
2760 | printk(KERN_INFO "%s Ver. %s\n", | |
2761 | VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); | |
2762 | printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); | |
2763 | printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); | |
2764 | first = 0; | |
2765 | } | |
2766 | ||
2767 | velocity_init_info(pdev, vptr, info); | |
2768 | ||
2769 | vptr->dev = dev; | |
2770 | ||
2771 | dev->irq = pdev->irq; | |
2772 | ||
2773 | ret = pci_enable_device(pdev); | |
2774 | if (ret < 0) | |
2775 | goto err_free_dev; | |
2776 | ||
2777 | ret = velocity_get_pci_info(vptr, pdev); | |
2778 | if (ret < 0) { | |
2779 | /* error message already printed */ | |
2780 | goto err_disable; | |
2781 | } | |
2782 | ||
2783 | ret = pci_request_regions(pdev, VELOCITY_NAME); | |
2784 | if (ret < 0) { | |
2785 | dev_err(&pdev->dev, "No PCI resources.\n"); | |
2786 | goto err_disable; | |
2787 | } | |
2788 | ||
2789 | regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); | |
2790 | if (regs == NULL) { | |
2791 | ret = -EIO; | |
2792 | goto err_release_res; | |
2793 | } | |
2794 | ||
2795 | vptr->mac_regs = regs; | |
2796 | ||
2797 | mac_wol_reset(regs); | |
2798 | ||
2799 | dev->base_addr = vptr->ioaddr; | |
2800 | ||
2801 | for (i = 0; i < 6; i++) | |
2802 | dev->dev_addr[i] = readb(®s->PAR[i]); | |
2803 | ||
2804 | ||
2805 | drv_string = dev_driver_string(&pdev->dev); | |
2806 | ||
2807 | velocity_get_options(&vptr->options, velocity_nics, drv_string); | |
2808 | ||
2809 | /* | |
2810 | * Mask out the options cannot be set to the chip | |
2811 | */ | |
2812 | ||
2813 | vptr->options.flags &= info->flags; | |
2814 | ||
2815 | /* | |
2816 | * Enable the chip specified capbilities | |
2817 | */ | |
2818 | ||
2819 | vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); | |
2820 | ||
2821 | vptr->wol_opts = vptr->options.wol_opts; | |
2822 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
2823 | ||
2824 | vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); | |
2825 | ||
2826 | dev->irq = pdev->irq; | |
2827 | dev->netdev_ops = &velocity_netdev_ops; | |
2828 | dev->ethtool_ops = &velocity_ethtool_ops; | |
2829 | netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); | |
2830 | ||
2831 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | | |
2832 | NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; | |
2833 | ||
2834 | ret = register_netdev(dev); | |
2835 | if (ret < 0) | |
2836 | goto err_iounmap; | |
2837 | ||
2838 | if (!velocity_get_link(dev)) { | |
2839 | netif_carrier_off(dev); | |
2840 | vptr->mii_status |= VELOCITY_LINK_FAIL; | |
2841 | } | |
2842 | ||
2843 | velocity_print_info(vptr); | |
2844 | pci_set_drvdata(pdev, dev); | |
2845 | ||
2846 | /* and leave the chip powered down */ | |
2847 | ||
2848 | pci_set_power_state(pdev, PCI_D3hot); | |
2849 | velocity_nics++; | |
2850 | out: | |
2851 | return ret; | |
2852 | ||
2853 | err_iounmap: | |
2854 | iounmap(regs); | |
2855 | err_release_res: | |
2856 | pci_release_regions(pdev); | |
2857 | err_disable: | |
2858 | pci_disable_device(pdev); | |
2859 | err_free_dev: | |
2860 | free_netdev(dev); | |
2861 | goto out; | |
2862 | } | |
2863 | ||
2864 | ||
2865 | #ifdef CONFIG_PM | |
2866 | /** | |
2867 | * wol_calc_crc - WOL CRC | |
2868 | * @pattern: data pattern | |
2869 | * @mask_pattern: mask | |
2870 | * | |
2871 | * Compute the wake on lan crc hashes for the packet header | |
2872 | * we are interested in. | |
2873 | */ | |
2874 | static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern) | |
2875 | { | |
2876 | u16 crc = 0xFFFF; | |
2877 | u8 mask; | |
2878 | int i, j; | |
2879 | ||
2880 | for (i = 0; i < size; i++) { | |
2881 | mask = mask_pattern[i]; | |
2882 | ||
2883 | /* Skip this loop if the mask equals to zero */ | |
2884 | if (mask == 0x00) | |
2885 | continue; | |
2886 | ||
2887 | for (j = 0; j < 8; j++) { | |
2888 | if ((mask & 0x01) == 0) { | |
2889 | mask >>= 1; | |
2890 | continue; | |
2891 | } | |
2892 | mask >>= 1; | |
2893 | crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1); | |
2894 | } | |
2895 | } | |
2896 | /* Finally, invert the result once to get the correct data */ | |
2897 | crc = ~crc; | |
2898 | return bitrev32(crc) >> 16; | |
2899 | } | |
2900 | ||
2901 | /** | |
2902 | * velocity_set_wol - set up for wake on lan | |
2903 | * @vptr: velocity to set WOL status on | |
2904 | * | |
2905 | * Set a card up for wake on lan either by unicast or by | |
2906 | * ARP packet. | |
2907 | * | |
2908 | * FIXME: check static buffer is safe here | |
2909 | */ | |
2910 | static int velocity_set_wol(struct velocity_info *vptr) | |
2911 | { | |
2912 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
2913 | static u8 buf[256]; | |
2914 | int i; | |
2915 | ||
2916 | static u32 mask_pattern[2][4] = { | |
2917 | {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ | |
2918 | {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ | |
2919 | }; | |
2920 | ||
2921 | writew(0xFFFF, ®s->WOLCRClr); | |
2922 | writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); | |
2923 | writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); | |
2924 | ||
2925 | /* | |
2926 | if (vptr->wol_opts & VELOCITY_WOL_PHY) | |
2927 | writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet); | |
2928 | */ | |
2929 | ||
2930 | if (vptr->wol_opts & VELOCITY_WOL_UCAST) | |
2931 | writew(WOLCR_UNICAST_EN, ®s->WOLCRSet); | |
2932 | ||
2933 | if (vptr->wol_opts & VELOCITY_WOL_ARP) { | |
2934 | struct arp_packet *arp = (struct arp_packet *) buf; | |
2935 | u16 crc; | |
2936 | memset(buf, 0, sizeof(struct arp_packet) + 7); | |
2937 | ||
2938 | for (i = 0; i < 4; i++) | |
2939 | writel(mask_pattern[0][i], ®s->ByteMask[0][i]); | |
2940 | ||
2941 | arp->type = htons(ETH_P_ARP); | |
2942 | arp->ar_op = htons(1); | |
2943 | ||
2944 | memcpy(arp->ar_tip, vptr->ip_addr, 4); | |
2945 | ||
2946 | crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, | |
2947 | (u8 *) & mask_pattern[0][0]); | |
2948 | ||
2949 | writew(crc, ®s->PatternCRC[0]); | |
2950 | writew(WOLCR_ARP_EN, ®s->WOLCRSet); | |
2951 | } | |
2952 | ||
2953 | BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet); | |
2954 | BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet); | |
2955 | ||
2956 | writew(0x0FFF, ®s->WOLSRClr); | |
2957 | ||
2958 | if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { | |
2959 | if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) | |
2960 | MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); | |
2961 | ||
2962 | MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); | |
2963 | } | |
2964 | ||
2965 | if (vptr->mii_status & VELOCITY_SPEED_1000) | |
2966 | MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); | |
2967 | ||
2968 | BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
2969 | ||
2970 | { | |
2971 | u8 GCR; | |
2972 | GCR = readb(®s->CHIPGCR); | |
2973 | GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX; | |
2974 | writeb(GCR, ®s->CHIPGCR); | |
2975 | } | |
2976 | ||
2977 | BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); | |
2978 | /* Turn on SWPTAG just before entering power mode */ | |
2979 | BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); | |
2980 | /* Go to bed ..... */ | |
2981 | BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); | |
2982 | ||
2983 | return 0; | |
2984 | } | |
2985 | ||
2986 | /** | |
2987 | * velocity_save_context - save registers | |
2988 | * @vptr: velocity | |
2989 | * @context: buffer for stored context | |
2990 | * | |
2991 | * Retrieve the current configuration from the velocity hardware | |
2992 | * and stash it in the context structure, for use by the context | |
2993 | * restore functions. This allows us to save things we need across | |
2994 | * power down states | |
2995 | */ | |
2996 | static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) | |
2997 | { | |
2998 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
2999 | u16 i; | |
3000 | u8 __iomem *ptr = (u8 __iomem *)regs; | |
3001 | ||
3002 | for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4) | |
3003 | *((u32 *) (context->mac_reg + i)) = readl(ptr + i); | |
3004 | ||
3005 | for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4) | |
3006 | *((u32 *) (context->mac_reg + i)) = readl(ptr + i); | |
3007 | ||
3008 | for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) | |
3009 | *((u32 *) (context->mac_reg + i)) = readl(ptr + i); | |
3010 | ||
3011 | } | |
3012 | ||
3013 | static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) | |
3014 | { | |
3015 | struct net_device *dev = pci_get_drvdata(pdev); | |
3016 | struct velocity_info *vptr = netdev_priv(dev); | |
3017 | unsigned long flags; | |
3018 | ||
3019 | if (!netif_running(vptr->dev)) | |
3020 | return 0; | |
3021 | ||
3022 | netif_device_detach(vptr->dev); | |
3023 | ||
3024 | spin_lock_irqsave(&vptr->lock, flags); | |
3025 | pci_save_state(pdev); | |
3026 | #ifdef ETHTOOL_GWOL | |
3027 | if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { | |
3028 | velocity_get_ip(vptr); | |
3029 | velocity_save_context(vptr, &vptr->context); | |
3030 | velocity_shutdown(vptr); | |
3031 | velocity_set_wol(vptr); | |
3032 | pci_enable_wake(pdev, PCI_D3hot, 1); | |
3033 | pci_set_power_state(pdev, PCI_D3hot); | |
3034 | } else { | |
3035 | velocity_save_context(vptr, &vptr->context); | |
3036 | velocity_shutdown(vptr); | |
3037 | pci_disable_device(pdev); | |
3038 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
3039 | } | |
3040 | #else | |
3041 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
3042 | #endif | |
3043 | spin_unlock_irqrestore(&vptr->lock, flags); | |
3044 | return 0; | |
3045 | } | |
3046 | ||
3047 | /** | |
3048 | * velocity_restore_context - restore registers | |
3049 | * @vptr: velocity | |
3050 | * @context: buffer for stored context | |
3051 | * | |
3052 | * Reload the register configuration from the velocity context | |
3053 | * created by velocity_save_context. | |
3054 | */ | |
3055 | static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) | |
3056 | { | |
3057 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
3058 | int i; | |
3059 | u8 __iomem *ptr = (u8 __iomem *)regs; | |
3060 | ||
3061 | for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) | |
3062 | writel(*((u32 *) (context->mac_reg + i)), ptr + i); | |
3063 | ||
3064 | /* Just skip cr0 */ | |
3065 | for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) { | |
3066 | /* Clear */ | |
3067 | writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4); | |
3068 | /* Set */ | |
3069 | writeb(*((u8 *) (context->mac_reg + i)), ptr + i); | |
3070 | } | |
3071 | ||
3072 | for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) | |
3073 | writel(*((u32 *) (context->mac_reg + i)), ptr + i); | |
3074 | ||
3075 | for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) | |
3076 | writel(*((u32 *) (context->mac_reg + i)), ptr + i); | |
3077 | ||
3078 | for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) | |
3079 | writeb(*((u8 *) (context->mac_reg + i)), ptr + i); | |
3080 | } | |
3081 | ||
3082 | static int velocity_resume(struct pci_dev *pdev) | |
3083 | { | |
3084 | struct net_device *dev = pci_get_drvdata(pdev); | |
3085 | struct velocity_info *vptr = netdev_priv(dev); | |
3086 | unsigned long flags; | |
3087 | int i; | |
3088 | ||
3089 | if (!netif_running(vptr->dev)) | |
3090 | return 0; | |
3091 | ||
3092 | pci_set_power_state(pdev, PCI_D0); | |
3093 | pci_enable_wake(pdev, 0, 0); | |
3094 | pci_restore_state(pdev); | |
3095 | ||
3096 | mac_wol_reset(vptr->mac_regs); | |
3097 | ||
3098 | spin_lock_irqsave(&vptr->lock, flags); | |
3099 | velocity_restore_context(vptr, &vptr->context); | |
3100 | velocity_init_registers(vptr, VELOCITY_INIT_WOL); | |
3101 | mac_disable_int(vptr->mac_regs); | |
3102 | ||
3103 | velocity_tx_srv(vptr, 0); | |
3104 | ||
3105 | for (i = 0; i < vptr->tx.numq; i++) { | |
3106 | if (vptr->tx.used[i]) | |
3107 | mac_tx_queue_wake(vptr->mac_regs, i); | |
3108 | } | |
3109 | ||
3110 | mac_enable_int(vptr->mac_regs); | |
3111 | spin_unlock_irqrestore(&vptr->lock, flags); | |
3112 | netif_device_attach(vptr->dev); | |
3113 | ||
3114 | return 0; | |
3115 | } | |
3116 | #endif | |
3117 | ||
3118 | /* | |
3119 | * Definition for our device driver. The PCI layer interface | |
3120 | * uses this to handle all our card discover and plugging | |
3121 | */ | |
3122 | static struct pci_driver velocity_driver = { | |
3123 | .name = VELOCITY_NAME, | |
3124 | .id_table = velocity_id_table, | |
3125 | .probe = velocity_found1, | |
3126 | .remove = __devexit_p(velocity_remove1), | |
3127 | #ifdef CONFIG_PM | |
3128 | .suspend = velocity_suspend, | |
3129 | .resume = velocity_resume, | |
3130 | #endif | |
3131 | }; | |
3132 | ||
3133 | ||
3134 | /** | |
3135 | * velocity_ethtool_up - pre hook for ethtool | |
3136 | * @dev: network device | |
3137 | * | |
3138 | * Called before an ethtool operation. We need to make sure the | |
3139 | * chip is out of D3 state before we poke at it. | |
3140 | */ | |
3141 | static int velocity_ethtool_up(struct net_device *dev) | |
3142 | { | |
3143 | struct velocity_info *vptr = netdev_priv(dev); | |
3144 | if (!netif_running(dev)) | |
3145 | pci_set_power_state(vptr->pdev, PCI_D0); | |
3146 | return 0; | |
3147 | } | |
3148 | ||
3149 | /** | |
3150 | * velocity_ethtool_down - post hook for ethtool | |
3151 | * @dev: network device | |
3152 | * | |
3153 | * Called after an ethtool operation. Restore the chip back to D3 | |
3154 | * state if it isn't running. | |
3155 | */ | |
3156 | static void velocity_ethtool_down(struct net_device *dev) | |
3157 | { | |
3158 | struct velocity_info *vptr = netdev_priv(dev); | |
3159 | if (!netif_running(dev)) | |
3160 | pci_set_power_state(vptr->pdev, PCI_D3hot); | |
3161 | } | |
3162 | ||
3163 | static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
3164 | { | |
3165 | struct velocity_info *vptr = netdev_priv(dev); | |
3166 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
3167 | u32 status; | |
3168 | status = check_connection_type(vptr->mac_regs); | |
3169 | ||
3170 | cmd->supported = SUPPORTED_TP | | |
3171 | SUPPORTED_Autoneg | | |
3172 | SUPPORTED_10baseT_Half | | |
3173 | SUPPORTED_10baseT_Full | | |
3174 | SUPPORTED_100baseT_Half | | |
3175 | SUPPORTED_100baseT_Full | | |
3176 | SUPPORTED_1000baseT_Half | | |
3177 | SUPPORTED_1000baseT_Full; | |
3178 | if (status & VELOCITY_SPEED_1000) | |
3179 | cmd->speed = SPEED_1000; | |
3180 | else if (status & VELOCITY_SPEED_100) | |
3181 | cmd->speed = SPEED_100; | |
3182 | else | |
3183 | cmd->speed = SPEED_10; | |
3184 | cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; | |
3185 | cmd->port = PORT_TP; | |
3186 | cmd->transceiver = XCVR_INTERNAL; | |
3187 | cmd->phy_address = readb(®s->MIIADR) & 0x1F; | |
3188 | ||
3189 | if (status & VELOCITY_DUPLEX_FULL) | |
3190 | cmd->duplex = DUPLEX_FULL; | |
3191 | else | |
3192 | cmd->duplex = DUPLEX_HALF; | |
3193 | ||
3194 | return 0; | |
3195 | } | |
3196 | ||
3197 | static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
3198 | { | |
3199 | struct velocity_info *vptr = netdev_priv(dev); | |
3200 | u32 curr_status; | |
3201 | u32 new_status = 0; | |
3202 | int ret = 0; | |
3203 | ||
3204 | curr_status = check_connection_type(vptr->mac_regs); | |
3205 | curr_status &= (~VELOCITY_LINK_FAIL); | |
3206 | ||
3207 | new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); | |
3208 | new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); | |
3209 | new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); | |
3210 | new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); | |
3211 | ||
3212 | if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) | |
3213 | ret = -EINVAL; | |
3214 | else | |
3215 | velocity_set_media_mode(vptr, new_status); | |
3216 | ||
3217 | return ret; | |
3218 | } | |
3219 | ||
3220 | static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
3221 | { | |
3222 | struct velocity_info *vptr = netdev_priv(dev); | |
3223 | strcpy(info->driver, VELOCITY_NAME); | |
3224 | strcpy(info->version, VELOCITY_VERSION); | |
3225 | strcpy(info->bus_info, pci_name(vptr->pdev)); | |
3226 | } | |
3227 | ||
3228 | static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
3229 | { | |
3230 | struct velocity_info *vptr = netdev_priv(dev); | |
3231 | wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; | |
3232 | wol->wolopts |= WAKE_MAGIC; | |
3233 | /* | |
3234 | if (vptr->wol_opts & VELOCITY_WOL_PHY) | |
3235 | wol.wolopts|=WAKE_PHY; | |
3236 | */ | |
3237 | if (vptr->wol_opts & VELOCITY_WOL_UCAST) | |
3238 | wol->wolopts |= WAKE_UCAST; | |
3239 | if (vptr->wol_opts & VELOCITY_WOL_ARP) | |
3240 | wol->wolopts |= WAKE_ARP; | |
3241 | memcpy(&wol->sopass, vptr->wol_passwd, 6); | |
3242 | } | |
3243 | ||
3244 | static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
3245 | { | |
3246 | struct velocity_info *vptr = netdev_priv(dev); | |
3247 | ||
3248 | if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) | |
3249 | return -EFAULT; | |
3250 | vptr->wol_opts = VELOCITY_WOL_MAGIC; | |
3251 | ||
3252 | /* | |
3253 | if (wol.wolopts & WAKE_PHY) { | |
3254 | vptr->wol_opts|=VELOCITY_WOL_PHY; | |
3255 | vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED; | |
3256 | } | |
3257 | */ | |
3258 | ||
3259 | if (wol->wolopts & WAKE_MAGIC) { | |
3260 | vptr->wol_opts |= VELOCITY_WOL_MAGIC; | |
3261 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
3262 | } | |
3263 | if (wol->wolopts & WAKE_UCAST) { | |
3264 | vptr->wol_opts |= VELOCITY_WOL_UCAST; | |
3265 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
3266 | } | |
3267 | if (wol->wolopts & WAKE_ARP) { | |
3268 | vptr->wol_opts |= VELOCITY_WOL_ARP; | |
3269 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
3270 | } | |
3271 | memcpy(vptr->wol_passwd, wol->sopass, 6); | |
3272 | return 0; | |
3273 | } | |
3274 | ||
3275 | static u32 velocity_get_msglevel(struct net_device *dev) | |
3276 | { | |
3277 | return msglevel; | |
3278 | } | |
3279 | ||
3280 | static void velocity_set_msglevel(struct net_device *dev, u32 value) | |
3281 | { | |
3282 | msglevel = value; | |
3283 | } | |
3284 | ||
3285 | static int get_pending_timer_val(int val) | |
3286 | { | |
3287 | int mult_bits = val >> 6; | |
3288 | int mult = 1; | |
3289 | ||
3290 | switch (mult_bits) | |
3291 | { | |
3292 | case 1: | |
3293 | mult = 4; break; | |
3294 | case 2: | |
3295 | mult = 16; break; | |
3296 | case 3: | |
3297 | mult = 64; break; | |
3298 | case 0: | |
3299 | default: | |
3300 | break; | |
3301 | } | |
3302 | ||
3303 | return (val & 0x3f) * mult; | |
3304 | } | |
3305 | ||
3306 | static void set_pending_timer_val(int *val, u32 us) | |
3307 | { | |
3308 | u8 mult = 0; | |
3309 | u8 shift = 0; | |
3310 | ||
3311 | if (us >= 0x3f) { | |
3312 | mult = 1; /* mult with 4 */ | |
3313 | shift = 2; | |
3314 | } | |
3315 | if (us >= 0x3f * 4) { | |
3316 | mult = 2; /* mult with 16 */ | |
3317 | shift = 4; | |
3318 | } | |
3319 | if (us >= 0x3f * 16) { | |
3320 | mult = 3; /* mult with 64 */ | |
3321 | shift = 6; | |
3322 | } | |
3323 | ||
3324 | *val = (mult << 6) | ((us >> shift) & 0x3f); | |
3325 | } | |
3326 | ||
3327 | ||
3328 | static int velocity_get_coalesce(struct net_device *dev, | |
3329 | struct ethtool_coalesce *ecmd) | |
3330 | { | |
3331 | struct velocity_info *vptr = netdev_priv(dev); | |
3332 | ||
3333 | ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; | |
3334 | ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; | |
3335 | ||
3336 | ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); | |
3337 | ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); | |
3338 | ||
3339 | return 0; | |
3340 | } | |
3341 | ||
3342 | static int velocity_set_coalesce(struct net_device *dev, | |
3343 | struct ethtool_coalesce *ecmd) | |
3344 | { | |
3345 | struct velocity_info *vptr = netdev_priv(dev); | |
3346 | int max_us = 0x3f * 64; | |
3347 | ||
3348 | /* 6 bits of */ | |
3349 | if (ecmd->tx_coalesce_usecs > max_us) | |
3350 | return -EINVAL; | |
3351 | if (ecmd->rx_coalesce_usecs > max_us) | |
3352 | return -EINVAL; | |
3353 | ||
3354 | if (ecmd->tx_max_coalesced_frames > 0xff) | |
3355 | return -EINVAL; | |
3356 | if (ecmd->rx_max_coalesced_frames > 0xff) | |
3357 | return -EINVAL; | |
3358 | ||
3359 | vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; | |
3360 | vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; | |
3361 | ||
3362 | set_pending_timer_val(&vptr->options.rxqueue_timer, | |
3363 | ecmd->rx_coalesce_usecs); | |
3364 | set_pending_timer_val(&vptr->options.txqueue_timer, | |
3365 | ecmd->tx_coalesce_usecs); | |
3366 | ||
3367 | /* Setup the interrupt suppression and queue timers */ | |
3368 | mac_disable_int(vptr->mac_regs); | |
3369 | setup_adaptive_interrupts(vptr); | |
3370 | setup_queue_timers(vptr); | |
3371 | ||
3372 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); | |
3373 | mac_clear_isr(vptr->mac_regs); | |
3374 | mac_enable_int(vptr->mac_regs); | |
3375 | ||
3376 | return 0; | |
3377 | } | |
3378 | ||
3379 | static const struct ethtool_ops velocity_ethtool_ops = { | |
3380 | .get_settings = velocity_get_settings, | |
3381 | .set_settings = velocity_set_settings, | |
3382 | .get_drvinfo = velocity_get_drvinfo, | |
3383 | .set_tx_csum = ethtool_op_set_tx_csum, | |
3384 | .get_tx_csum = ethtool_op_get_tx_csum, | |
3385 | .get_wol = velocity_ethtool_get_wol, | |
3386 | .set_wol = velocity_ethtool_set_wol, | |
3387 | .get_msglevel = velocity_get_msglevel, | |
3388 | .set_msglevel = velocity_set_msglevel, | |
3389 | .set_sg = ethtool_op_set_sg, | |
3390 | .get_link = velocity_get_link, | |
3391 | .get_coalesce = velocity_get_coalesce, | |
3392 | .set_coalesce = velocity_set_coalesce, | |
3393 | .begin = velocity_ethtool_up, | |
3394 | .complete = velocity_ethtool_down | |
3395 | }; | |
3396 | ||
3397 | #ifdef CONFIG_PM | |
3398 | #ifdef CONFIG_INET | |
3399 | static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) | |
3400 | { | |
3401 | struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; | |
3402 | struct net_device *dev = ifa->ifa_dev->dev; | |
3403 | ||
3404 | if (dev_net(dev) == &init_net && | |
3405 | dev->netdev_ops == &velocity_netdev_ops) | |
3406 | velocity_get_ip(netdev_priv(dev)); | |
3407 | ||
3408 | return NOTIFY_DONE; | |
3409 | } | |
3410 | #endif /* CONFIG_INET */ | |
3411 | #endif /* CONFIG_PM */ | |
3412 | ||
3413 | #if defined(CONFIG_PM) && defined(CONFIG_INET) | |
3414 | static struct notifier_block velocity_inetaddr_notifier = { | |
3415 | .notifier_call = velocity_netdev_event, | |
3416 | }; | |
3417 | ||
3418 | static void velocity_register_notifier(void) | |
3419 | { | |
3420 | register_inetaddr_notifier(&velocity_inetaddr_notifier); | |
3421 | } | |
3422 | ||
3423 | static void velocity_unregister_notifier(void) | |
3424 | { | |
3425 | unregister_inetaddr_notifier(&velocity_inetaddr_notifier); | |
3426 | } | |
3427 | ||
3428 | #else | |
3429 | ||
3430 | #define velocity_register_notifier() do {} while (0) | |
3431 | #define velocity_unregister_notifier() do {} while (0) | |
3432 | ||
3433 | #endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */ | |
3434 | ||
3435 | /** | |
3436 | * velocity_init_module - load time function | |
3437 | * | |
3438 | * Called when the velocity module is loaded. The PCI driver | |
3439 | * is registered with the PCI layer, and in turn will call | |
3440 | * the probe functions for each velocity adapter installed | |
3441 | * in the system. | |
3442 | */ | |
3443 | static int __init velocity_init_module(void) | |
3444 | { | |
3445 | int ret; | |
3446 | ||
3447 | velocity_register_notifier(); | |
3448 | ret = pci_register_driver(&velocity_driver); | |
3449 | if (ret < 0) | |
3450 | velocity_unregister_notifier(); | |
3451 | return ret; | |
3452 | } | |
3453 | ||
3454 | /** | |
3455 | * velocity_cleanup - module unload | |
3456 | * | |
3457 | * When the velocity hardware is unloaded this function is called. | |
3458 | * It will clean up the notifiers and the unregister the PCI | |
3459 | * driver interface for this hardware. This in turn cleans up | |
3460 | * all discovered interfaces before returning from the function | |
3461 | */ | |
3462 | static void __exit velocity_cleanup_module(void) | |
3463 | { | |
3464 | velocity_unregister_notifier(); | |
3465 | pci_unregister_driver(&velocity_driver); | |
3466 | } | |
3467 | ||
3468 | module_init(velocity_init_module); | |
3469 | module_exit(velocity_cleanup_module); |