]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/fcoe/fcoe.c
[SCSI] fcoe: Remove ifdef for NETIF_F_FCOE_CRC and NETIF_F_FSO
[net-next-2.6.git] / drivers / scsi / fcoe / fcoe.c
CommitLineData
85b4aa49
RL
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
85b4aa49 22#include <linux/spinlock.h>
85b4aa49
RL
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
85b4aa49
RL
28#include <linux/crc32.h>
29#include <linux/cpu.h>
30#include <linux/fs.h>
31#include <linux/sysfs.h>
32#include <linux/ctype.h>
33#include <scsi/scsi_tcq.h>
34#include <scsi/scsicam.h>
35#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_fc.h>
37#include <net/rtnetlink.h>
38
39#include <scsi/fc/fc_encaps.h>
97c8389d 40#include <scsi/fc/fc_fip.h>
85b4aa49
RL
41
42#include <scsi/libfc.h>
43#include <scsi/fc_frame.h>
44#include <scsi/libfcoe.h>
85b4aa49 45
fdd78027 46#include "fcoe.h"
7f349142 47
85b4aa49
RL
48MODULE_AUTHOR("Open-FCoE.org");
49MODULE_DESCRIPTION("FCoE");
9b34ecff 50MODULE_LICENSE("GPL v2");
85b4aa49
RL
51
52/* fcoe host list */
53LIST_HEAD(fcoe_hostlist);
54DEFINE_RWLOCK(fcoe_hostlist_lock);
5e5e92df 55DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
85b4aa49 56
dd3fd72e 57/* Function Prototypes */
fdd78027
VD
58static int fcoe_reset(struct Scsi_Host *shost);
59static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
60static int fcoe_rcv(struct sk_buff *, struct net_device *,
61 struct packet_type *, struct net_device *);
62static int fcoe_percpu_receive_thread(void *arg);
63static void fcoe_clean_pending_queue(struct fc_lport *lp);
64static void fcoe_percpu_clean(struct fc_lport *lp);
65static int fcoe_link_ok(struct fc_lport *lp);
66
67static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
68static int fcoe_hostlist_add(const struct fc_lport *);
69static int fcoe_hostlist_remove(const struct fc_lport *);
70
4bb6b515 71static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
85b4aa49
RL
72static int fcoe_device_notification(struct notifier_block *, ulong, void *);
73static void fcoe_dev_setup(void);
74static void fcoe_dev_cleanup(void);
75
76/* notification function from net device */
77static struct notifier_block fcoe_notifier = {
78 .notifier_call = fcoe_device_notification,
79};
80
7f349142
VD
81static struct scsi_transport_template *scsi_transport_fcoe_sw;
82
83struct fc_function_template fcoe_transport_function = {
84 .show_host_node_name = 1,
85 .show_host_port_name = 1,
86 .show_host_supported_classes = 1,
87 .show_host_supported_fc4s = 1,
88 .show_host_active_fc4s = 1,
89 .show_host_maxframe_size = 1,
90
91 .show_host_port_id = 1,
92 .show_host_supported_speeds = 1,
93 .get_host_speed = fc_get_host_speed,
94 .show_host_speed = 1,
95 .show_host_port_type = 1,
96 .get_host_port_state = fc_get_host_port_state,
97 .show_host_port_state = 1,
98 .show_host_symbolic_name = 1,
99
100 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
101 .show_rport_maxframe_size = 1,
102 .show_rport_supported_classes = 1,
103
104 .show_host_fabric_name = 1,
105 .show_starget_node_name = 1,
106 .show_starget_port_name = 1,
107 .show_starget_port_id = 1,
108 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
109 .show_rport_dev_loss_tmo = 1,
110 .get_fc_host_stats = fc_get_host_stats,
111 .issue_fc_host_lip = fcoe_reset,
112
113 .terminate_rport_io = fc_rport_terminate_io,
114};
115
116static struct scsi_host_template fcoe_shost_template = {
117 .module = THIS_MODULE,
118 .name = "FCoE Driver",
119 .proc_name = FCOE_NAME,
120 .queuecommand = fc_queuecommand,
121 .eh_abort_handler = fc_eh_abort,
122 .eh_device_reset_handler = fc_eh_device_reset,
123 .eh_host_reset_handler = fc_eh_host_reset,
124 .slave_alloc = fc_slave_alloc,
125 .change_queue_depth = fc_change_queue_depth,
126 .change_queue_type = fc_change_queue_type,
127 .this_id = -1,
128 .cmd_per_lun = 32,
129 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
130 .use_clustering = ENABLE_CLUSTERING,
131 .sg_tablesize = SG_ALL,
132 .max_sectors = 0xffff,
133};
134
ab6b85c1
VD
135/**
136 * fcoe_fip_recv - handle a received FIP frame.
137 * @skb: the receive skb
138 * @dev: associated &net_device
139 * @ptype: the &packet_type structure which was used to register this handler.
140 * @orig_dev: original receive &net_device, in case @dev is a bond.
141 *
142 * Returns: 0 for success
143 */
144static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
145 struct packet_type *ptype,
146 struct net_device *orig_dev)
147{
148 struct fcoe_softc *fc;
149
150 fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
151 fcoe_ctlr_recv(&fc->ctlr, skb);
152 return 0;
153}
154
155/**
156 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
157 * @fip: FCoE controller.
158 * @skb: FIP Packet.
159 */
160static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
161{
162 skb->dev = fcoe_from_ctlr(fip)->real_dev;
163 dev_queue_xmit(skb);
164}
165
166/**
167 * fcoe_update_src_mac() - Update Ethernet MAC filters.
168 * @fip: FCoE controller.
169 * @old: Unicast MAC address to delete if the MAC is non-zero.
170 * @new: Unicast MAC address to add.
171 *
172 * Remove any previously-set unicast MAC filter.
173 * Add secondary FCoE MAC address filter for our OUI.
174 */
175static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
176{
177 struct fcoe_softc *fc;
178
179 fc = fcoe_from_ctlr(fip);
180 rtnl_lock();
181 if (!is_zero_ether_addr(old))
ccffad25
JP
182 dev_unicast_delete(fc->real_dev, old);
183 dev_unicast_add(fc->real_dev, new);
ab6b85c1
VD
184 rtnl_unlock();
185}
186
7f349142
VD
187/**
188 * fcoe_lport_config() - sets up the fc_lport
189 * @lp: ptr to the fc_lport
7f349142
VD
190 *
191 * Returns: 0 for success
192 */
193static int fcoe_lport_config(struct fc_lport *lp)
194{
195 lp->link_up = 0;
196 lp->qfull = 0;
197 lp->max_retry_count = 3;
a3666955 198 lp->max_rport_retry_count = 3;
7f349142
VD
199 lp->e_d_tov = 2 * 1000; /* FC-FS default */
200 lp->r_a_tov = 2 * 2 * 1000;
201 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
202 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
203
204 fc_lport_init_stats(lp);
205
206 /* lport fc_lport related configuration */
207 fc_lport_config(lp);
208
209 /* offload related configuration */
210 lp->crc_offload = 0;
211 lp->seq_offload = 0;
212 lp->lro_enabled = 0;
213 lp->lro_xid = 0;
214 lp->lso_max = 0;
215
216 return 0;
217}
218
ab6b85c1
VD
219/**
220 * fcoe_netdev_cleanup() - clean up netdev configurations
221 * @fc: ptr to the fcoe_softc
222 */
223void fcoe_netdev_cleanup(struct fcoe_softc *fc)
224{
225 u8 flogi_maddr[ETH_ALEN];
226
227 /* Don't listen for Ethernet packets anymore */
228 dev_remove_pack(&fc->fcoe_packet_type);
229 dev_remove_pack(&fc->fip_packet_type);
230
231 /* Delete secondary MAC addresses */
232 rtnl_lock();
233 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
ccffad25 234 dev_unicast_delete(fc->real_dev, flogi_maddr);
ab6b85c1 235 if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
ccffad25 236 dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr);
184dd345 237 if (fc->ctlr.spma)
ccffad25 238 dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr);
ab6b85c1
VD
239 dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
240 rtnl_unlock();
241}
242
1047f221
VD
243/**
244 * fcoe_queue_timer() - fcoe queue timer
245 * @lp: the fc_lport pointer
246 *
247 * Calls fcoe_check_wait_queue on timeout
248 *
249 */
250static void fcoe_queue_timer(ulong lp)
251{
252 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
253}
254
7f349142
VD
255/**
256 * fcoe_netdev_config() - Set up netdev for SW FCoE
257 * @lp : ptr to the fc_lport
258 * @netdev : ptr to the associated netdevice struct
259 *
260 * Must be called after fcoe_lport_config() as it will use lport mutex
261 *
262 * Returns : 0 for success
263 */
264static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
265{
266 u32 mfs;
267 u64 wwnn, wwpn;
268 struct fcoe_softc *fc;
269 u8 flogi_maddr[ETH_ALEN];
184dd345 270 struct netdev_hw_addr *ha;
7f349142
VD
271
272 /* Setup lport private data to point to fcoe softc */
273 fc = lport_priv(lp);
97c8389d 274 fc->ctlr.lp = lp;
7f349142
VD
275 fc->real_dev = netdev;
276 fc->phys_dev = netdev;
277
278 /* Require support for get_pauseparam ethtool op. */
279 if (netdev->priv_flags & IFF_802_1Q_VLAN)
280 fc->phys_dev = vlan_dev_real_dev(netdev);
281
282 /* Do not support for bonding device */
283 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
284 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
285 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
286 return -EOPNOTSUPP;
287 }
288
289 /*
290 * Determine max frame size based on underlying device and optional
291 * user-configured limit. If the MFS is too low, fcoe_link_ok()
292 * will return 0, so do this first.
293 */
294 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
295 sizeof(struct fcoe_crc_eof));
296 if (fc_set_mfs(lp, mfs))
297 return -EINVAL;
298
7f349142
VD
299 /* offload features support */
300 if (fc->real_dev->features & NETIF_F_SG)
301 lp->sg_supp = 1;
302
7f349142
VD
303 if (netdev->features & NETIF_F_FCOE_CRC) {
304 lp->crc_offload = 1;
d5488eb9 305 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
7f349142 306 }
7f349142
VD
307 if (netdev->features & NETIF_F_FSO) {
308 lp->seq_offload = 1;
309 lp->lso_max = netdev->gso_max_size;
d5488eb9
RL
310 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
311 lp->lso_max);
7f349142 312 }
7f349142
VD
313 if (netdev->fcoe_ddp_xid) {
314 lp->lro_enabled = 1;
315 lp->lro_xid = netdev->fcoe_ddp_xid;
d5488eb9
RL
316 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
317 lp->lro_xid);
7f349142
VD
318 }
319 skb_queue_head_init(&fc->fcoe_pending_queue);
320 fc->fcoe_pending_queue_active = 0;
1047f221 321 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
7f349142 322
184dd345
VD
323 /* look for SAN MAC address, if multiple SAN MACs exist, only
324 * use the first one for SPMA */
325 rcu_read_lock();
326 for_each_dev_addr(netdev, ha) {
327 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
7a7f0c7f 328 (is_valid_ether_addr(ha->addr))) {
184dd345
VD
329 memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
330 fc->ctlr.spma = 1;
331 break;
332 }
333 }
334 rcu_read_unlock();
335
7f349142 336 /* setup Source Mac Address */
184dd345
VD
337 if (!fc->ctlr.spma)
338 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
339 fc->real_dev->addr_len);
7f349142
VD
340
341 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
342 fc_set_wwnn(lp, wwnn);
343 /* XXX - 3rd arg needs to be vlan id */
344 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
345 fc_set_wwpn(lp, wwpn);
346
347 /*
348 * Add FCoE MAC address as second unicast MAC address
349 * or enter promiscuous mode if not capable of listening
350 * for multiple unicast MACs.
351 */
352 rtnl_lock();
353 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
ccffad25 354 dev_unicast_add(fc->real_dev, flogi_maddr);
184dd345 355 if (fc->ctlr.spma)
ccffad25 356 dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr);
6401bdca 357 dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
7f349142
VD
358 rtnl_unlock();
359
360 /*
361 * setup the receive function from ethernet driver
362 * on the ethertype for the given device
363 */
364 fc->fcoe_packet_type.func = fcoe_rcv;
365 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
366 fc->fcoe_packet_type.dev = fc->real_dev;
367 dev_add_pack(&fc->fcoe_packet_type);
368
ab6b85c1
VD
369 fc->fip_packet_type.func = fcoe_fip_recv;
370 fc->fip_packet_type.type = htons(ETH_P_FIP);
371 fc->fip_packet_type.dev = fc->real_dev;
372 dev_add_pack(&fc->fip_packet_type);
373
7f349142
VD
374 return 0;
375}
376
377/**
378 * fcoe_shost_config() - Sets up fc_lport->host
379 * @lp : ptr to the fc_lport
380 * @shost : ptr to the associated scsi host
381 * @dev : device associated to scsi host
382 *
383 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
384 *
385 * Returns : 0 for success
386 */
387static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
388 struct device *dev)
389{
390 int rc = 0;
391
392 /* lport scsi host config */
393 lp->host = shost;
394
395 lp->host->max_lun = FCOE_MAX_LUN;
396 lp->host->max_id = FCOE_MAX_FCP_TARGET;
397 lp->host->max_channel = 0;
398 lp->host->transportt = scsi_transport_fcoe_sw;
399
400 /* add the new host to the SCSI-ml */
401 rc = scsi_add_host(lp->host, dev);
402 if (rc) {
d5488eb9
RL
403 FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
404 "error on scsi_add_host\n");
7f349142
VD
405 return rc;
406 }
407 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
408 FCOE_NAME, FCOE_VERSION,
409 fcoe_netdev(lp)->name);
410
411 return 0;
412}
413
d7179680
VD
414/*
415 * fcoe_oem_match() - match for read types IO
416 * @fp: the fc_frame for new IO.
417 *
418 * Returns : true for read types IO, otherwise returns false.
419 */
420bool fcoe_oem_match(struct fc_frame *fp)
421{
422 return fc_fcp_is_read(fr_fsp(fp));
423}
424
7f349142
VD
425/**
426 * fcoe_em_config() - allocates em for this lport
427 * @lp: the port that em is to allocated for
428 *
e8af4d43
VD
429 * Called with write fcoe_hostlist_lock held.
430 *
7f349142
VD
431 * Returns : 0 on success
432 */
433static inline int fcoe_em_config(struct fc_lport *lp)
434{
d7179680
VD
435 struct fcoe_softc *fc = lport_priv(lp);
436 struct fcoe_softc *oldfc = NULL;
437 u16 min_xid = FCOE_MIN_XID;
438 u16 max_xid = FCOE_MAX_XID;
439
440 /*
441 * Check if need to allocate an em instance for
442 * offload exchange ids to be shared across all VN_PORTs/lport.
443 */
444 if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
445 lp->lro_xid = 0;
446 goto skip_oem;
447 }
448
449 /*
450 * Reuse existing offload em instance in case
451 * it is already allocated on phys_dev.
452 */
453 list_for_each_entry(oldfc, &fcoe_hostlist, list) {
454 if (oldfc->phys_dev == fc->phys_dev) {
455 fc->oem = oldfc->oem;
456 break;
457 }
458 }
459
460 if (fc->oem) {
461 if (!fc_exch_mgr_add(lp, fc->oem, fcoe_oem_match)) {
462 printk(KERN_ERR "fcoe_em_config: failed to add "
463 "offload em:%p on interface:%s\n",
464 fc->oem, fc->real_dev->name);
465 return -ENOMEM;
466 }
467 } else {
468 fc->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
469 FCOE_MIN_XID, lp->lro_xid,
470 fcoe_oem_match);
471 if (!fc->oem) {
472 printk(KERN_ERR "fcoe_em_config: failed to allocate "
473 "em for offload exches on interface:%s\n",
474 fc->real_dev->name);
475 return -ENOMEM;
476 }
477 }
478
479 /*
480 * Exclude offload EM xid range from next EM xid range.
481 */
482 min_xid += lp->lro_xid + 1;
483
484skip_oem:
485 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
486 printk(KERN_ERR "fcoe_em_config: failed to "
487 "allocate em on interface %s\n", fc->real_dev->name);
7f349142 488 return -ENOMEM;
d7179680 489 }
7f349142
VD
490
491 return 0;
492}
493
494/**
495 * fcoe_if_destroy() - FCoE software HBA tear-down function
496 * @netdev: ptr to the associated net_device
497 *
498 * Returns: 0 if link is OK for use by FCoE.
499 */
500static int fcoe_if_destroy(struct net_device *netdev)
501{
502 struct fc_lport *lp = NULL;
503 struct fcoe_softc *fc;
7f349142
VD
504
505 BUG_ON(!netdev);
506
d5488eb9 507 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
7f349142
VD
508
509 lp = fcoe_hostlist_lookup(netdev);
510 if (!lp)
511 return -ENODEV;
512
513 fc = lport_priv(lp);
514
515 /* Logout of the fabric */
516 fc_fabric_logoff(lp);
517
518 /* Remove the instance from fcoe's list */
519 fcoe_hostlist_remove(lp);
520
ab6b85c1
VD
521 /* clean up netdev configurations */
522 fcoe_netdev_cleanup(fc);
523
524 /* tear-down the FCoE controller */
97c8389d 525 fcoe_ctlr_destroy(&fc->ctlr);
7f349142 526
f161fb72
JE
527 /* Free queued packets for the per-CPU receive threads */
528 fcoe_percpu_clean(lp);
529
7f349142
VD
530 /* Cleanup the fc_lport */
531 fc_lport_destroy(lp);
532 fc_fcp_destroy(lp);
533
534 /* Detach from the scsi-ml */
535 fc_remove_host(lp->host);
536 scsi_remove_host(lp->host);
537
538 /* There are no more rports or I/O, free the EM */
52ff878c 539 fc_exch_mgr_free(lp);
7f349142 540
7f349142
VD
541 /* Free existing skbs */
542 fcoe_clean_pending_queue(lp);
543
1047f221
VD
544 /* Stop the timer */
545 del_timer_sync(&fc->timer);
546
7f349142
VD
547 /* Free memory used by statistical counters */
548 fc_lport_free_stats(lp);
549
550 /* Release the net_device and Scsi_Host */
551 dev_put(fc->real_dev);
552 scsi_host_put(lp->host);
553
554 return 0;
555}
556
557/*
558 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
559 * @lp: the corresponding fc_lport
560 * @xid: the exchange id for this ddp transfer
561 * @sgl: the scatterlist describing this transfer
562 * @sgc: number of sg items
563 *
564 * Returns : 0 no ddp
565 */
566static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
567 struct scatterlist *sgl, unsigned int sgc)
568{
569 struct net_device *n = fcoe_netdev(lp);
570
571 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
572 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
573
574 return 0;
575}
576
577/*
578 * fcoe_ddp_done - calls LLD's ddp_done through net_device
579 * @lp: the corresponding fc_lport
580 * @xid: the exchange id for this ddp transfer
581 *
582 * Returns : the length of data that have been completed by ddp
583 */
584static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
585{
586 struct net_device *n = fcoe_netdev(lp);
587
588 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
589 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
590 return 0;
591}
592
593static struct libfc_function_template fcoe_libfc_fcn_templ = {
594 .frame_send = fcoe_xmit,
595 .ddp_setup = fcoe_ddp_setup,
596 .ddp_done = fcoe_ddp_done,
597};
598
599/**
600 * fcoe_if_create() - this function creates the fcoe interface
601 * @netdev: pointer the associated netdevice
602 *
603 * Creates fc_lport struct and scsi_host for lport, configures lport
604 * and starts fabric login.
605 *
606 * Returns : 0 on success
607 */
608static int fcoe_if_create(struct net_device *netdev)
609{
610 int rc;
611 struct fc_lport *lp = NULL;
612 struct fcoe_softc *fc;
613 struct Scsi_Host *shost;
614
615 BUG_ON(!netdev);
616
d5488eb9 617 FCOE_NETDEV_DBG(netdev, "Create Interface\n");
7f349142
VD
618
619 lp = fcoe_hostlist_lookup(netdev);
620 if (lp)
621 return -EEXIST;
622
a0a25da2
VD
623 shost = libfc_host_alloc(&fcoe_shost_template,
624 sizeof(struct fcoe_softc));
7f349142 625 if (!shost) {
d5488eb9 626 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
7f349142
VD
627 return -ENOMEM;
628 }
629 lp = shost_priv(shost);
630 fc = lport_priv(lp);
631
632 /* configure fc_lport, e.g., em */
633 rc = fcoe_lport_config(lp);
634 if (rc) {
d5488eb9
RL
635 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
636 "interface\n");
7f349142
VD
637 goto out_host_put;
638 }
639
97c8389d
JE
640 /*
641 * Initialize FIP.
642 */
643 fcoe_ctlr_init(&fc->ctlr);
644 fc->ctlr.send = fcoe_fip_send;
645 fc->ctlr.update_mac = fcoe_update_src_mac;
646
ab6b85c1
VD
647 /* configure lport network properties */
648 rc = fcoe_netdev_config(lp, netdev);
649 if (rc) {
d5488eb9
RL
650 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
651 "interface\n");
ab6b85c1
VD
652 goto out_netdev_cleanup;
653 }
97c8389d 654
7f349142
VD
655 /* configure lport scsi host properties */
656 rc = fcoe_shost_config(lp, shost, &netdev->dev);
657 if (rc) {
d5488eb9
RL
658 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
659 "interface\n");
ab6b85c1 660 goto out_netdev_cleanup;
7f349142
VD
661 }
662
96316099
VD
663 /* Initialize the library */
664 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
7f349142 665 if (rc) {
96316099 666 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
d5488eb9 667 "interface\n");
96316099 668 goto out_lp_destroy;
7f349142
VD
669 }
670
e8af4d43
VD
671 /*
672 * fcoe_em_alloc() and fcoe_hostlist_add() both
673 * need to be atomic under fcoe_hostlist_lock
674 * since fcoe_em_alloc() looks for an existing EM
675 * instance on host list updated by fcoe_hostlist_add().
676 */
677 write_lock(&fcoe_hostlist_lock);
96316099
VD
678 /* lport exch manager allocation */
679 rc = fcoe_em_config(lp);
7f349142 680 if (rc) {
96316099 681 FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
d5488eb9 682 "interface\n");
7f349142
VD
683 goto out_lp_destroy;
684 }
685
686 /* add to lports list */
687 fcoe_hostlist_add(lp);
e8af4d43 688 write_unlock(&fcoe_hostlist_lock);
7f349142
VD
689
690 lp->boot_time = jiffies;
691
692 fc_fabric_login(lp);
693
97c8389d
JE
694 if (!fcoe_link_ok(lp))
695 fcoe_ctlr_link_up(&fc->ctlr);
696
7f349142
VD
697 dev_hold(netdev);
698
699 return rc;
700
701out_lp_destroy:
52ff878c 702 fc_exch_mgr_free(lp);
ab6b85c1
VD
703out_netdev_cleanup:
704 fcoe_netdev_cleanup(fc);
7f349142
VD
705out_host_put:
706 scsi_host_put(lp->host);
707 return rc;
708}
709
710/**
711 * fcoe_if_init() - attach to scsi transport
712 *
713 * Returns : 0 on success
714 */
715static int __init fcoe_if_init(void)
716{
717 /* attach to scsi transport */
718 scsi_transport_fcoe_sw =
719 fc_attach_transport(&fcoe_transport_function);
720
721 if (!scsi_transport_fcoe_sw) {
d5488eb9 722 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
7f349142
VD
723 return -ENODEV;
724 }
725
726 return 0;
727}
728
729/**
730 * fcoe_if_exit() - detach from scsi transport
731 *
732 * Returns : 0 on success
733 */
734int __exit fcoe_if_exit(void)
735{
736 fc_release_transport(scsi_transport_fcoe_sw);
737 return 0;
738}
739
8976f424
RL
740/**
741 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
742 * @cpu: cpu index for the online cpu
743 */
744static void fcoe_percpu_thread_create(unsigned int cpu)
745{
746 struct fcoe_percpu_s *p;
747 struct task_struct *thread;
748
749 p = &per_cpu(fcoe_percpu, cpu);
750
751 thread = kthread_create(fcoe_percpu_receive_thread,
752 (void *)p, "fcoethread/%d", cpu);
753
754 if (likely(!IS_ERR(p->thread))) {
755 kthread_bind(thread, cpu);
756 wake_up_process(thread);
757
758 spin_lock_bh(&p->fcoe_rx_list.lock);
759 p->thread = thread;
760 spin_unlock_bh(&p->fcoe_rx_list.lock);
761 }
762}
763
764/**
765 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
766 * @cpu: cpu index the rx thread is to be removed
767 *
768 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
769 * current CPU's Rx thread. If the thread being destroyed is bound to
770 * the CPU processing this context the skbs will be freed.
771 */
772static void fcoe_percpu_thread_destroy(unsigned int cpu)
773{
774 struct fcoe_percpu_s *p;
775 struct task_struct *thread;
776 struct page *crc_eof;
777 struct sk_buff *skb;
778#ifdef CONFIG_SMP
779 struct fcoe_percpu_s *p0;
780 unsigned targ_cpu = smp_processor_id();
781#endif /* CONFIG_SMP */
782
d5488eb9 783 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
8976f424
RL
784
785 /* Prevent any new skbs from being queued for this CPU. */
786 p = &per_cpu(fcoe_percpu, cpu);
787 spin_lock_bh(&p->fcoe_rx_list.lock);
788 thread = p->thread;
789 p->thread = NULL;
790 crc_eof = p->crc_eof_page;
791 p->crc_eof_page = NULL;
792 p->crc_eof_offset = 0;
793 spin_unlock_bh(&p->fcoe_rx_list.lock);
794
795#ifdef CONFIG_SMP
796 /*
797 * Don't bother moving the skb's if this context is running
798 * on the same CPU that is having its thread destroyed. This
799 * can easily happen when the module is removed.
800 */
801 if (cpu != targ_cpu) {
802 p0 = &per_cpu(fcoe_percpu, targ_cpu);
803 spin_lock_bh(&p0->fcoe_rx_list.lock);
804 if (p0->thread) {
d5488eb9
RL
805 FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
806 cpu, targ_cpu);
8976f424
RL
807
808 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
809 __skb_queue_tail(&p0->fcoe_rx_list, skb);
810 spin_unlock_bh(&p0->fcoe_rx_list.lock);
811 } else {
812 /*
813 * The targeted CPU is not initialized and cannot accept
814 * new skbs. Unlock the targeted CPU and drop the skbs
815 * on the CPU that is going offline.
816 */
817 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
818 kfree_skb(skb);
819 spin_unlock_bh(&p0->fcoe_rx_list.lock);
820 }
821 } else {
822 /*
823 * This scenario occurs when the module is being removed
824 * and all threads are being destroyed. skbs will continue
825 * to be shifted from the CPU thread that is being removed
826 * to the CPU thread associated with the CPU that is processing
827 * the module removal. Once there is only one CPU Rx thread it
828 * will reach this case and we will drop all skbs and later
829 * stop the thread.
830 */
831 spin_lock_bh(&p->fcoe_rx_list.lock);
832 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
833 kfree_skb(skb);
834 spin_unlock_bh(&p->fcoe_rx_list.lock);
835 }
836#else
837 /*
dd3fd72e 838 * This a non-SMP scenario where the singular Rx thread is
8976f424
RL
839 * being removed. Free all skbs and stop the thread.
840 */
841 spin_lock_bh(&p->fcoe_rx_list.lock);
842 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
843 kfree_skb(skb);
844 spin_unlock_bh(&p->fcoe_rx_list.lock);
845#endif
846
847 if (thread)
848 kthread_stop(thread);
849
850 if (crc_eof)
851 put_page(crc_eof);
852}
853
854/**
855 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
856 * @nfb: callback data block
857 * @action: event triggering the callback
858 * @hcpu: index for the cpu of this event
859 *
860 * This creates or destroys per cpu data for fcoe
861 *
862 * Returns NOTIFY_OK always.
863 */
864static int fcoe_cpu_callback(struct notifier_block *nfb,
865 unsigned long action, void *hcpu)
866{
867 unsigned cpu = (unsigned long)hcpu;
868
869 switch (action) {
870 case CPU_ONLINE:
871 case CPU_ONLINE_FROZEN:
d5488eb9 872 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
8976f424
RL
873 fcoe_percpu_thread_create(cpu);
874 break;
875 case CPU_DEAD:
876 case CPU_DEAD_FROZEN:
d5488eb9 877 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
8976f424
RL
878 fcoe_percpu_thread_destroy(cpu);
879 break;
880 default:
881 break;
882 }
883 return NOTIFY_OK;
884}
885
886static struct notifier_block fcoe_cpu_notifier = {
887 .notifier_call = fcoe_cpu_callback,
888};
889
85b4aa49 890/**
34f42a07 891 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
85b4aa49
RL
892 * @skb: the receive skb
893 * @dev: associated net device
894 * @ptype: context
dd3fd72e 895 * @olddev: last device
85b4aa49
RL
896 *
897 * this function will receive the packet and build fc frame and pass it up
898 *
899 * Returns: 0 for success
34f42a07 900 */
85b4aa49
RL
901int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
902 struct packet_type *ptype, struct net_device *olddev)
903{
904 struct fc_lport *lp;
905 struct fcoe_rcv_info *fr;
906 struct fcoe_softc *fc;
85b4aa49 907 struct fc_frame_header *fh;
85b4aa49 908 struct fcoe_percpu_s *fps;
38eccabd 909 unsigned short oxid;
8976f424 910 unsigned int cpu = 0;
85b4aa49
RL
911
912 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
97c8389d 913 lp = fc->ctlr.lp;
85b4aa49 914 if (unlikely(lp == NULL)) {
d5488eb9 915 FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
85b4aa49
RL
916 goto err2;
917 }
97c8389d
JE
918 if (!lp->link_up)
919 goto err2;
85b4aa49 920
d5488eb9
RL
921 FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
922 "data:%p tail:%p end:%p sum:%d dev:%s",
923 skb->len, skb->data_len, skb->head, skb->data,
924 skb_tail_pointer(skb), skb_end_pointer(skb),
925 skb->csum, skb->dev ? skb->dev->name : "<NULL>");
85b4aa49
RL
926
927 /* check for FCOE packet type */
928 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
d5488eb9 929 FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
85b4aa49
RL
930 goto err;
931 }
932
933 /*
934 * Check for minimum frame length, and make sure required FCoE
935 * and FC headers are pulled into the linear data area.
936 */
937 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
938 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
939 goto err;
940
941 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
942 fh = (struct fc_frame_header *) skb_transport_header(skb);
943
944 oxid = ntohs(fh->fh_ox_id);
945
946 fr = fcoe_dev_from_skb(skb);
947 fr->fr_dev = lp;
948 fr->ptype = ptype;
5e5e92df 949
85b4aa49
RL
950#ifdef CONFIG_SMP
951 /*
952 * The incoming frame exchange id(oxid) is ANDed with num of online
8976f424
RL
953 * cpu bits to get cpu and then this cpu is used for selecting
954 * a per cpu kernel thread from fcoe_percpu.
85b4aa49 955 */
8976f424 956 cpu = oxid & (num_online_cpus() - 1);
85b4aa49 957#endif
5e5e92df 958
8976f424 959 fps = &per_cpu(fcoe_percpu, cpu);
85b4aa49 960 spin_lock_bh(&fps->fcoe_rx_list.lock);
8976f424
RL
961 if (unlikely(!fps->thread)) {
962 /*
963 * The targeted CPU is not ready, let's target
964 * the first CPU now. For non-SMP systems this
965 * will check the same CPU twice.
966 */
d5488eb9
RL
967 FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
968 "ready for incoming skb- using first online "
969 "CPU.\n");
8976f424
RL
970
971 spin_unlock_bh(&fps->fcoe_rx_list.lock);
972 cpu = first_cpu(cpu_online_map);
973 fps = &per_cpu(fcoe_percpu, cpu);
974 spin_lock_bh(&fps->fcoe_rx_list.lock);
975 if (!fps->thread) {
976 spin_unlock_bh(&fps->fcoe_rx_list.lock);
977 goto err;
978 }
979 }
980
981 /*
982 * We now have a valid CPU that we're targeting for
983 * this skb. We also have this receive thread locked,
984 * so we're free to queue skbs into it's queue.
985 */
85b4aa49
RL
986 __skb_queue_tail(&fps->fcoe_rx_list, skb);
987 if (fps->fcoe_rx_list.qlen == 1)
988 wake_up_process(fps->thread);
989
990 spin_unlock_bh(&fps->fcoe_rx_list.lock);
991
992 return 0;
993err:
582b45bc 994 fc_lport_get_stats(lp)->ErrorFrames++;
85b4aa49
RL
995
996err2:
997 kfree_skb(skb);
998 return -1;
999}
85b4aa49
RL
1000
1001/**
34f42a07 1002 * fcoe_start_io() - pass to netdev to start xmit for fcoe
85b4aa49
RL
1003 * @skb: the skb to be xmitted
1004 *
1005 * Returns: 0 for success
34f42a07 1006 */
85b4aa49
RL
1007static inline int fcoe_start_io(struct sk_buff *skb)
1008{
1009 int rc;
1010
1011 skb_get(skb);
1012 rc = dev_queue_xmit(skb);
1013 if (rc != 0)
1014 return rc;
1015 kfree_skb(skb);
1016 return 0;
1017}
1018
1019/**
dd3fd72e 1020 * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
85b4aa49
RL
1021 * @skb: the skb to be xmitted
1022 * @tlen: total len
1023 *
1024 * Returns: 0 for success
34f42a07 1025 */
85b4aa49
RL
1026static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1027{
1028 struct fcoe_percpu_s *fps;
1029 struct page *page;
85b4aa49 1030
5e5e92df 1031 fps = &get_cpu_var(fcoe_percpu);
85b4aa49
RL
1032 page = fps->crc_eof_page;
1033 if (!page) {
1034 page = alloc_page(GFP_ATOMIC);
1035 if (!page) {
5e5e92df 1036 put_cpu_var(fcoe_percpu);
85b4aa49
RL
1037 return -ENOMEM;
1038 }
1039 fps->crc_eof_page = page;
8976f424 1040 fps->crc_eof_offset = 0;
85b4aa49
RL
1041 }
1042
1043 get_page(page);
1044 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
1045 fps->crc_eof_offset, tlen);
1046 skb->len += tlen;
1047 skb->data_len += tlen;
1048 skb->truesize += tlen;
1049 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
1050
1051 if (fps->crc_eof_offset >= PAGE_SIZE) {
1052 fps->crc_eof_page = NULL;
1053 fps->crc_eof_offset = 0;
1054 put_page(page);
1055 }
5e5e92df 1056 put_cpu_var(fcoe_percpu);
85b4aa49
RL
1057 return 0;
1058}
1059
1060/**
34f42a07 1061 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
dd3fd72e 1062 * @fp: the fc_frame containing data to be checksummed
85b4aa49
RL
1063 *
1064 * This uses crc32() to calculate the crc for fc frame
1065 * Return : 32 bit crc
34f42a07 1066 */
85b4aa49
RL
1067u32 fcoe_fc_crc(struct fc_frame *fp)
1068{
1069 struct sk_buff *skb = fp_skb(fp);
1070 struct skb_frag_struct *frag;
1071 unsigned char *data;
1072 unsigned long off, len, clen;
1073 u32 crc;
1074 unsigned i;
1075
1076 crc = crc32(~0, skb->data, skb_headlen(skb));
1077
1078 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1079 frag = &skb_shinfo(skb)->frags[i];
1080 off = frag->page_offset;
1081 len = frag->size;
1082 while (len > 0) {
1083 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1084 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1085 KM_SKB_DATA_SOFTIRQ);
1086 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1087 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1088 off += clen;
1089 len -= clen;
1090 }
1091 }
1092 return crc;
1093}
85b4aa49
RL
1094
1095/**
34f42a07 1096 * fcoe_xmit() - FCoE frame transmit function
85b4aa49
RL
1097 * @lp: the associated local port
1098 * @fp: the fc_frame to be transmitted
1099 *
1100 * Return : 0 for success
34f42a07 1101 */
85b4aa49
RL
1102int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1103{
4bb6b515 1104 int wlen;
85b4aa49
RL
1105 u32 crc;
1106 struct ethhdr *eh;
1107 struct fcoe_crc_eof *cp;
1108 struct sk_buff *skb;
1109 struct fcoe_dev_stats *stats;
1110 struct fc_frame_header *fh;
1111 unsigned int hlen; /* header length implies the version */
1112 unsigned int tlen; /* trailer length */
1113 unsigned int elen; /* eth header, may include vlan */
85b4aa49
RL
1114 struct fcoe_softc *fc;
1115 u8 sof, eof;
1116 struct fcoe_hdr *hp;
1117
1118 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1119
fc47ff6b 1120 fc = lport_priv(lp);
85b4aa49 1121 fh = fc_frame_header_get(fp);
97c8389d
JE
1122 skb = fp_skb(fp);
1123 wlen = skb->len / FCOE_WORD_TO_BYTE;
1124
1125 if (!lp->link_up) {
3caf02ee 1126 kfree_skb(skb);
97c8389d 1127 return 0;
85b4aa49
RL
1128 }
1129
97c8389d
JE
1130 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1131 fcoe_ctlr_els_send(&fc->ctlr, skb))
1132 return 0;
1133
85b4aa49
RL
1134 sof = fr_sof(fp);
1135 eof = fr_eof(fp);
1136
4e57e1cb 1137 elen = sizeof(struct ethhdr);
85b4aa49
RL
1138 hlen = sizeof(struct fcoe_hdr);
1139 tlen = sizeof(struct fcoe_crc_eof);
1140 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1141
1142 /* crc offload */
1143 if (likely(lp->crc_offload)) {
39ca9a06 1144 skb->ip_summed = CHECKSUM_PARTIAL;
85b4aa49
RL
1145 skb->csum_start = skb_headroom(skb);
1146 skb->csum_offset = skb->len;
1147 crc = 0;
1148 } else {
1149 skb->ip_summed = CHECKSUM_NONE;
1150 crc = fcoe_fc_crc(fp);
1151 }
1152
1153 /* copy fc crc and eof to the skb buff */
1154 if (skb_is_nonlinear(skb)) {
1155 skb_frag_t *frag;
1156 if (fcoe_get_paged_crc_eof(skb, tlen)) {
e9041581 1157 kfree_skb(skb);
85b4aa49
RL
1158 return -ENOMEM;
1159 }
1160 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1161 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1162 + frag->page_offset;
1163 } else {
1164 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1165 }
1166
1167 memset(cp, 0, sizeof(*cp));
1168 cp->fcoe_eof = eof;
1169 cp->fcoe_crc32 = cpu_to_le32(~crc);
1170
1171 if (skb_is_nonlinear(skb)) {
1172 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1173 cp = NULL;
1174 }
1175
dd3fd72e 1176 /* adjust skb network/transport offsets to match mac/fcoe/fc */
85b4aa49
RL
1177 skb_push(skb, elen + hlen);
1178 skb_reset_mac_header(skb);
1179 skb_reset_network_header(skb);
1180 skb->mac_len = elen;
211c738d 1181 skb->protocol = htons(ETH_P_FCOE);
85b4aa49
RL
1182 skb->dev = fc->real_dev;
1183
1184 /* fill up mac and fcoe headers */
1185 eh = eth_hdr(skb);
1186 eh->h_proto = htons(ETH_P_FCOE);
97c8389d 1187 if (fc->ctlr.map_dest)
85b4aa49
RL
1188 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1189 else
1190 /* insert GW address */
97c8389d 1191 memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN);
85b4aa49 1192
97c8389d
JE
1193 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1194 memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN);
85b4aa49 1195 else
97c8389d 1196 memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN);
85b4aa49
RL
1197
1198 hp = (struct fcoe_hdr *)(eh + 1);
1199 memset(hp, 0, sizeof(*hp));
1200 if (FC_FCOE_VER)
1201 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1202 hp->fcoe_sof = sof;
1203
39ca9a06
YZ
1204 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1205 if (lp->seq_offload && fr_max_payload(fp)) {
1206 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1207 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1208 } else {
1209 skb_shinfo(skb)->gso_type = 0;
1210 skb_shinfo(skb)->gso_size = 0;
1211 }
85b4aa49 1212 /* update tx stats: regardless if LLD fails */
582b45bc
RL
1213 stats = fc_lport_get_stats(lp);
1214 stats->TxFrames++;
1215 stats->TxWords += wlen;
85b4aa49
RL
1216
1217 /* send down to lld */
1218 fr_dev(fp) = lp;
1219 if (fc->fcoe_pending_queue.qlen)
4bb6b515
VD
1220 fcoe_check_wait_queue(lp, skb);
1221 else if (fcoe_start_io(skb))
1222 fcoe_check_wait_queue(lp, skb);
85b4aa49
RL
1223
1224 return 0;
1225}
85b4aa49 1226
34f42a07
RL
1227/**
1228 * fcoe_percpu_receive_thread() - recv thread per cpu
85b4aa49
RL
1229 * @arg: ptr to the fcoe per cpu struct
1230 *
1231 * Return: 0 for success
85b4aa49
RL
1232 */
1233int fcoe_percpu_receive_thread(void *arg)
1234{
1235 struct fcoe_percpu_s *p = arg;
1236 u32 fr_len;
1237 struct fc_lport *lp;
1238 struct fcoe_rcv_info *fr;
1239 struct fcoe_dev_stats *stats;
1240 struct fc_frame_header *fh;
1241 struct sk_buff *skb;
1242 struct fcoe_crc_eof crc_eof;
1243 struct fc_frame *fp;
1244 u8 *mac = NULL;
1245 struct fcoe_softc *fc;
1246 struct fcoe_hdr *hp;
1247
4469c195 1248 set_user_nice(current, -20);
85b4aa49
RL
1249
1250 while (!kthread_should_stop()) {
1251
1252 spin_lock_bh(&p->fcoe_rx_list.lock);
1253 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1254 set_current_state(TASK_INTERRUPTIBLE);
1255 spin_unlock_bh(&p->fcoe_rx_list.lock);
1256 schedule();
1257 set_current_state(TASK_RUNNING);
1258 if (kthread_should_stop())
1259 return 0;
1260 spin_lock_bh(&p->fcoe_rx_list.lock);
1261 }
1262 spin_unlock_bh(&p->fcoe_rx_list.lock);
1263 fr = fcoe_dev_from_skb(skb);
1264 lp = fr->fr_dev;
1265 if (unlikely(lp == NULL)) {
d5488eb9 1266 FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
85b4aa49
RL
1267 kfree_skb(skb);
1268 continue;
1269 }
1270
d5488eb9
RL
1271 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1272 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1273 skb->len, skb->data_len,
1274 skb->head, skb->data, skb_tail_pointer(skb),
1275 skb_end_pointer(skb), skb->csum,
1276 skb->dev ? skb->dev->name : "<NULL>");
85b4aa49
RL
1277
1278 /*
1279 * Save source MAC address before discarding header.
1280 */
1281 fc = lport_priv(lp);
85b4aa49
RL
1282 if (skb_is_nonlinear(skb))
1283 skb_linearize(skb); /* not ideal */
97c8389d 1284 mac = eth_hdr(skb)->h_source;
85b4aa49
RL
1285
1286 /*
1287 * Frame length checks and setting up the header pointers
1288 * was done in fcoe_rcv already.
1289 */
1290 hp = (struct fcoe_hdr *) skb_network_header(skb);
1291 fh = (struct fc_frame_header *) skb_transport_header(skb);
1292
582b45bc 1293 stats = fc_lport_get_stats(lp);
85b4aa49 1294 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
582b45bc 1295 if (stats->ErrorFrames < 5)
d5488eb9 1296 printk(KERN_WARNING "fcoe: FCoE version "
582b45bc
RL
1297 "mismatch: The frame has "
1298 "version %x, but the "
1299 "initiator supports version "
1300 "%x\n", FC_FCOE_DECAPS_VER(hp),
1301 FC_FCOE_VER);
1302 stats->ErrorFrames++;
85b4aa49
RL
1303 kfree_skb(skb);
1304 continue;
1305 }
1306
1307 skb_pull(skb, sizeof(struct fcoe_hdr));
1308 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1309
582b45bc
RL
1310 stats->RxFrames++;
1311 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
85b4aa49
RL
1312
1313 fp = (struct fc_frame *)skb;
1314 fc_frame_init(fp);
1315 fr_dev(fp) = lp;
1316 fr_sof(fp) = hp->fcoe_sof;
1317
1318 /* Copy out the CRC and EOF trailer for access */
1319 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1320 kfree_skb(skb);
1321 continue;
1322 }
1323 fr_eof(fp) = crc_eof.fcoe_eof;
1324 fr_crc(fp) = crc_eof.fcoe_crc32;
1325 if (pskb_trim(skb, fr_len)) {
1326 kfree_skb(skb);
1327 continue;
1328 }
1329
1330 /*
1331 * We only check CRC if no offload is available and if it is
1332 * it's solicited data, in which case, the FCP layer would
1333 * check it during the copy.
1334 */
07c00ec4 1335 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
85b4aa49
RL
1336 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1337 else
1338 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1339
1340 fh = fc_frame_header_get(fp);
1341 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1342 fh->fh_type == FC_TYPE_FCP) {
52ff878c 1343 fc_exch_recv(lp, fp);
85b4aa49
RL
1344 continue;
1345 }
1346 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1347 if (le32_to_cpu(fr_crc(fp)) !=
1348 ~crc32(~0, skb->data, fr_len)) {
d5488eb9 1349 if (stats->InvalidCRCCount < 5)
85b4aa49
RL
1350 printk(KERN_WARNING "fcoe: dropping "
1351 "frame with CRC error\n");
1352 stats->InvalidCRCCount++;
1353 stats->ErrorFrames++;
1354 fc_frame_free(fp);
1355 continue;
1356 }
1357 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1358 }
97c8389d
JE
1359 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1360 fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) {
1361 fc_frame_free(fp);
1362 continue;
1363 }
52ff878c 1364 fc_exch_recv(lp, fp);
85b4aa49
RL
1365 }
1366 return 0;
1367}
1368
85b4aa49 1369/**
dd3fd72e
CL
1370 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1371 * @lp: the fc_lport
85b4aa49
RL
1372 *
1373 * This empties the wait_queue, dequeue the head of the wait_queue queue
1374 * and calls fcoe_start_io() for each packet, if all skb have been
c826a314 1375 * transmitted, return qlen or -1 if a error occurs, then restore
dd3fd72e 1376 * wait_queue and try again later.
85b4aa49
RL
1377 *
1378 * The wait_queue is used when the skb transmit fails. skb will go
dd3fd72e 1379 * in the wait_queue which will be emptied by the timer function or
85b4aa49 1380 * by the next skb transmit.
34f42a07 1381 */
4bb6b515 1382static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
85b4aa49 1383{
55c8bafb 1384 struct fcoe_softc *fc = lport_priv(lp);
4bb6b515 1385 int rc;
85b4aa49 1386
85b4aa49 1387 spin_lock_bh(&fc->fcoe_pending_queue.lock);
4bb6b515
VD
1388
1389 if (skb)
1390 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1391
c826a314
VD
1392 if (fc->fcoe_pending_queue_active)
1393 goto out;
1394 fc->fcoe_pending_queue_active = 1;
55c8bafb
CL
1395
1396 while (fc->fcoe_pending_queue.qlen) {
1397 /* keep qlen > 0 until fcoe_start_io succeeds */
1398 fc->fcoe_pending_queue.qlen++;
1399 skb = __skb_dequeue(&fc->fcoe_pending_queue);
1400
1401 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1402 rc = fcoe_start_io(skb);
1403 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1404
1405 if (rc) {
1406 __skb_queue_head(&fc->fcoe_pending_queue, skb);
1407 /* undo temporary increment above */
1408 fc->fcoe_pending_queue.qlen--;
1409 break;
85b4aa49 1410 }
55c8bafb
CL
1411 /* undo temporary increment above */
1412 fc->fcoe_pending_queue.qlen--;
85b4aa49 1413 }
55c8bafb
CL
1414
1415 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1416 lp->qfull = 0;
1047f221
VD
1417 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1418 mod_timer(&fc->timer, jiffies + 2);
c826a314 1419 fc->fcoe_pending_queue_active = 0;
c826a314 1420out:
4bb6b515
VD
1421 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1422 lp->qfull = 1;
85b4aa49 1423 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
4bb6b515 1424 return;
85b4aa49
RL
1425}
1426
85b4aa49 1427/**
34f42a07
RL
1428 * fcoe_dev_setup() - setup link change notification interface
1429 */
b0d428ad 1430static void fcoe_dev_setup(void)
85b4aa49 1431{
85b4aa49
RL
1432 register_netdevice_notifier(&fcoe_notifier);
1433}
1434
1435/**
b0d428ad 1436 * fcoe_dev_cleanup() - cleanup link change notification interface
34f42a07 1437 */
85b4aa49
RL
1438static void fcoe_dev_cleanup(void)
1439{
1440 unregister_netdevice_notifier(&fcoe_notifier);
1441}
1442
1443/**
34f42a07 1444 * fcoe_device_notification() - netdev event notification callback
85b4aa49
RL
1445 * @notifier: context of the notification
1446 * @event: type of event
1447 * @ptr: fixed array for output parsed ifname
1448 *
1449 * This function is called by the ethernet driver in case of link change event
1450 *
1451 * Returns: 0 for success
34f42a07 1452 */
85b4aa49
RL
1453static int fcoe_device_notification(struct notifier_block *notifier,
1454 ulong event, void *ptr)
1455{
1456 struct fc_lport *lp = NULL;
1457 struct net_device *real_dev = ptr;
1458 struct fcoe_softc *fc;
1459 struct fcoe_dev_stats *stats;
97c8389d 1460 u32 link_possible = 1;
85b4aa49
RL
1461 u32 mfs;
1462 int rc = NOTIFY_OK;
1463
1464 read_lock(&fcoe_hostlist_lock);
1465 list_for_each_entry(fc, &fcoe_hostlist, list) {
1466 if (fc->real_dev == real_dev) {
97c8389d 1467 lp = fc->ctlr.lp;
85b4aa49
RL
1468 break;
1469 }
1470 }
1471 read_unlock(&fcoe_hostlist_lock);
1472 if (lp == NULL) {
1473 rc = NOTIFY_DONE;
1474 goto out;
1475 }
1476
85b4aa49
RL
1477 switch (event) {
1478 case NETDEV_DOWN:
1479 case NETDEV_GOING_DOWN:
97c8389d 1480 link_possible = 0;
85b4aa49
RL
1481 break;
1482 case NETDEV_UP:
1483 case NETDEV_CHANGE:
85b4aa49
RL
1484 break;
1485 case NETDEV_CHANGEMTU:
1486 mfs = fc->real_dev->mtu -
1487 (sizeof(struct fcoe_hdr) +
1488 sizeof(struct fcoe_crc_eof));
1489 if (mfs >= FC_MIN_MAX_FRAME)
1490 fc_set_mfs(lp, mfs);
85b4aa49
RL
1491 break;
1492 case NETDEV_REGISTER:
1493 break;
1494 default:
d5488eb9
RL
1495 FCOE_NETDEV_DBG(real_dev, "Unknown event %ld "
1496 "from netdev netlink\n", event);
85b4aa49 1497 }
97c8389d
JE
1498 if (link_possible && !fcoe_link_ok(lp))
1499 fcoe_ctlr_link_up(&fc->ctlr);
1500 else if (fcoe_ctlr_link_down(&fc->ctlr)) {
1501 stats = fc_lport_get_stats(lp);
1502 stats->LinkFailureCount++;
1503 fcoe_clean_pending_queue(lp);
85b4aa49
RL
1504 }
1505out:
1506 return rc;
1507}
1508
1509/**
34f42a07 1510 * fcoe_if_to_netdev() - parse a name buffer to get netdev
85b4aa49
RL
1511 * @buffer: incoming buffer to be copied
1512 *
dd3fd72e 1513 * Returns: NULL or ptr to net_device
34f42a07 1514 */
85b4aa49
RL
1515static struct net_device *fcoe_if_to_netdev(const char *buffer)
1516{
1517 char *cp;
1518 char ifname[IFNAMSIZ + 2];
1519
1520 if (buffer) {
1521 strlcpy(ifname, buffer, IFNAMSIZ);
1522 cp = ifname + strlen(ifname);
1523 while (--cp >= ifname && *cp == '\n')
1524 *cp = '\0';
1525 return dev_get_by_name(&init_net, ifname);
1526 }
1527 return NULL;
1528}
1529
1530/**
dd3fd72e 1531 * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
85b4aa49
RL
1532 * @netdev: the target netdev
1533 *
1534 * Returns: ptr to the struct module, NULL for failure
34f42a07 1535 */
b2ab99c9
RL
1536static struct module *
1537fcoe_netdev_to_module_owner(const struct net_device *netdev)
85b4aa49
RL
1538{
1539 struct device *dev;
1540
1541 if (!netdev)
1542 return NULL;
1543
1544 dev = netdev->dev.parent;
1545 if (!dev)
1546 return NULL;
1547
1548 if (!dev->driver)
1549 return NULL;
1550
1551 return dev->driver->owner;
1552}
1553
1554/**
34f42a07 1555 * fcoe_ethdrv_get() - Hold the Ethernet driver
85b4aa49
RL
1556 * @netdev: the target netdev
1557 *
34f42a07
RL
1558 * Holds the Ethernet driver module by try_module_get() for
1559 * the corresponding netdev.
1560 *
dd3fd72e 1561 * Returns: 0 for success
34f42a07 1562 */
85b4aa49
RL
1563static int fcoe_ethdrv_get(const struct net_device *netdev)
1564{
1565 struct module *owner;
1566
1567 owner = fcoe_netdev_to_module_owner(netdev);
1568 if (owner) {
d5488eb9
RL
1569 FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
1570 module_name(owner));
85b4aa49
RL
1571 return try_module_get(owner);
1572 }
1573 return -ENODEV;
1574}
1575
1576/**
34f42a07 1577 * fcoe_ethdrv_put() - Release the Ethernet driver
85b4aa49
RL
1578 * @netdev: the target netdev
1579 *
34f42a07
RL
1580 * Releases the Ethernet driver module by module_put for
1581 * the corresponding netdev.
1582 *
dd3fd72e 1583 * Returns: 0 for success
34f42a07 1584 */
85b4aa49
RL
1585static int fcoe_ethdrv_put(const struct net_device *netdev)
1586{
1587 struct module *owner;
1588
1589 owner = fcoe_netdev_to_module_owner(netdev);
1590 if (owner) {
d5488eb9
RL
1591 FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
1592 module_name(owner));
85b4aa49
RL
1593 module_put(owner);
1594 return 0;
1595 }
1596 return -ENODEV;
1597}
1598
1599/**
34f42a07 1600 * fcoe_destroy() - handles the destroy from sysfs
dd3fd72e 1601 * @buffer: expected to be an eth if name
85b4aa49
RL
1602 * @kp: associated kernel param
1603 *
1604 * Returns: 0 for success
34f42a07 1605 */
85b4aa49
RL
1606static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1607{
1608 int rc;
1609 struct net_device *netdev;
1610
1611 netdev = fcoe_if_to_netdev(buffer);
1612 if (!netdev) {
1613 rc = -ENODEV;
1614 goto out_nodev;
1615 }
1616 /* look for existing lport */
1617 if (!fcoe_hostlist_lookup(netdev)) {
1618 rc = -ENODEV;
1619 goto out_putdev;
1620 }
7f349142 1621 rc = fcoe_if_destroy(netdev);
85b4aa49 1622 if (rc) {
d5488eb9 1623 printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n",
85b4aa49
RL
1624 netdev->name);
1625 rc = -EIO;
1626 goto out_putdev;
1627 }
1628 fcoe_ethdrv_put(netdev);
1629 rc = 0;
1630out_putdev:
1631 dev_put(netdev);
1632out_nodev:
1633 return rc;
1634}
1635
1636/**
34f42a07 1637 * fcoe_create() - Handles the create call from sysfs
dd3fd72e 1638 * @buffer: expected to be an eth if name
85b4aa49
RL
1639 * @kp: associated kernel param
1640 *
1641 * Returns: 0 for success
34f42a07 1642 */
85b4aa49
RL
1643static int fcoe_create(const char *buffer, struct kernel_param *kp)
1644{
1645 int rc;
1646 struct net_device *netdev;
1647
1648 netdev = fcoe_if_to_netdev(buffer);
1649 if (!netdev) {
1650 rc = -ENODEV;
1651 goto out_nodev;
1652 }
1653 /* look for existing lport */
1654 if (fcoe_hostlist_lookup(netdev)) {
1655 rc = -EEXIST;
1656 goto out_putdev;
1657 }
1658 fcoe_ethdrv_get(netdev);
1659
7f349142 1660 rc = fcoe_if_create(netdev);
85b4aa49 1661 if (rc) {
d5488eb9 1662 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
85b4aa49
RL
1663 netdev->name);
1664 fcoe_ethdrv_put(netdev);
1665 rc = -EIO;
1666 goto out_putdev;
1667 }
1668 rc = 0;
1669out_putdev:
1670 dev_put(netdev);
1671out_nodev:
1672 return rc;
1673}
1674
1675module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1676__MODULE_PARM_TYPE(create, "string");
1677MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1678module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1679__MODULE_PARM_TYPE(destroy, "string");
1680MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1681
34f42a07
RL
1682/**
1683 * fcoe_link_ok() - Check if link is ok for the fc_lport
85b4aa49
RL
1684 * @lp: ptr to the fc_lport
1685 *
1686 * Any permanently-disqualifying conditions have been previously checked.
1687 * This also updates the speed setting, which may change with link for 100/1000.
1688 *
1689 * This function should probably be checking for PAUSE support at some point
1690 * in the future. Currently Per-priority-pause is not determinable using
1691 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1692 *
1693 * Returns: 0 if link is OK for use by FCoE.
1694 *
1695 */
1696int fcoe_link_ok(struct fc_lport *lp)
1697{
fc47ff6b 1698 struct fcoe_softc *fc = lport_priv(lp);
85b4aa49
RL
1699 struct net_device *dev = fc->real_dev;
1700 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
85b4aa49 1701
2f718d64
YZ
1702 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
1703 (!dev_ethtool_get_settings(dev, &ecmd))) {
1704 lp->link_supported_speeds &=
1705 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1706 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1707 SUPPORTED_1000baseT_Full))
1708 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1709 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1710 lp->link_supported_speeds |=
1711 FC_PORTSPEED_10GBIT;
1712 if (ecmd.speed == SPEED_1000)
1713 lp->link_speed = FC_PORTSPEED_1GBIT;
1714 if (ecmd.speed == SPEED_10000)
1715 lp->link_speed = FC_PORTSPEED_10GBIT;
85b4aa49 1716
2f718d64
YZ
1717 return 0;
1718 }
1719 return -1;
85b4aa49 1720}
85b4aa49 1721
34f42a07
RL
1722/**
1723 * fcoe_percpu_clean() - Clear the pending skbs for an lport
85b4aa49
RL
1724 * @lp: the fc_lport
1725 */
1726void fcoe_percpu_clean(struct fc_lport *lp)
1727{
85b4aa49
RL
1728 struct fcoe_percpu_s *pp;
1729 struct fcoe_rcv_info *fr;
1730 struct sk_buff_head *list;
1731 struct sk_buff *skb, *next;
1732 struct sk_buff *head;
5e5e92df 1733 unsigned int cpu;
85b4aa49 1734
5e5e92df
RL
1735 for_each_possible_cpu(cpu) {
1736 pp = &per_cpu(fcoe_percpu, cpu);
1737 spin_lock_bh(&pp->fcoe_rx_list.lock);
1738 list = &pp->fcoe_rx_list;
1739 head = list->next;
1740 for (skb = head; skb != (struct sk_buff *)list;
1741 skb = next) {
1742 next = skb->next;
1743 fr = fcoe_dev_from_skb(skb);
1744 if (fr->fr_dev == lp) {
1745 __skb_unlink(skb, list);
1746 kfree_skb(skb);
85b4aa49 1747 }
85b4aa49 1748 }
5e5e92df 1749 spin_unlock_bh(&pp->fcoe_rx_list.lock);
85b4aa49
RL
1750 }
1751}
85b4aa49
RL
1752
1753/**
34f42a07 1754 * fcoe_clean_pending_queue() - Dequeue a skb and free it
85b4aa49
RL
1755 * @lp: the corresponding fc_lport
1756 *
1757 * Returns: none
34f42a07 1758 */
85b4aa49
RL
1759void fcoe_clean_pending_queue(struct fc_lport *lp)
1760{
1761 struct fcoe_softc *fc = lport_priv(lp);
1762 struct sk_buff *skb;
1763
1764 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1765 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1766 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1767 kfree_skb(skb);
1768 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1769 }
1770 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1771}
85b4aa49 1772
34f42a07
RL
1773/**
1774 * fcoe_reset() - Resets the fcoe
85b4aa49
RL
1775 * @shost: shost the reset is from
1776 *
1777 * Returns: always 0
1778 */
1779int fcoe_reset(struct Scsi_Host *shost)
1780{
1781 struct fc_lport *lport = shost_priv(shost);
1782 fc_lport_reset(lport);
1783 return 0;
1784}
85b4aa49 1785
34f42a07
RL
1786/**
1787 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
dd3fd72e 1788 * @dev: this is currently ptr to net_device
85b4aa49 1789 *
e8af4d43
VD
1790 * Called with fcoe_hostlist_lock held.
1791 *
85b4aa49
RL
1792 * Returns: NULL or the located fcoe_softc
1793 */
b2ab99c9
RL
1794static struct fcoe_softc *
1795fcoe_hostlist_lookup_softc(const struct net_device *dev)
85b4aa49
RL
1796{
1797 struct fcoe_softc *fc;
1798
85b4aa49 1799 list_for_each_entry(fc, &fcoe_hostlist, list) {
e8af4d43 1800 if (fc->real_dev == dev)
85b4aa49 1801 return fc;
85b4aa49 1802 }
85b4aa49
RL
1803 return NULL;
1804}
1805
34f42a07
RL
1806/**
1807 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
85b4aa49
RL
1808 * @netdev: ptr to net_device
1809 *
1810 * Returns: 0 for success
1811 */
1812struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1813{
1814 struct fcoe_softc *fc;
1815
e8af4d43 1816 read_lock(&fcoe_hostlist_lock);
85b4aa49 1817 fc = fcoe_hostlist_lookup_softc(netdev);
e8af4d43 1818 read_unlock(&fcoe_hostlist_lock);
85b4aa49 1819
97c8389d 1820 return (fc) ? fc->ctlr.lp : NULL;
85b4aa49 1821}
85b4aa49 1822
34f42a07
RL
1823/**
1824 * fcoe_hostlist_add() - Add a lport to lports list
dd3fd72e 1825 * @lp: ptr to the fc_lport to be added
85b4aa49 1826 *
e8af4d43
VD
1827 * Called with write fcoe_hostlist_lock held.
1828 *
85b4aa49
RL
1829 * Returns: 0 for success
1830 */
1831int fcoe_hostlist_add(const struct fc_lport *lp)
1832{
1833 struct fcoe_softc *fc;
1834
1835 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1836 if (!fc) {
fc47ff6b 1837 fc = lport_priv(lp);
85b4aa49 1838 list_add_tail(&fc->list, &fcoe_hostlist);
85b4aa49
RL
1839 }
1840 return 0;
1841}
85b4aa49 1842
34f42a07
RL
1843/**
1844 * fcoe_hostlist_remove() - remove a lport from lports list
dd3fd72e 1845 * @lp: ptr to the fc_lport to be removed
85b4aa49
RL
1846 *
1847 * Returns: 0 for success
1848 */
1849int fcoe_hostlist_remove(const struct fc_lport *lp)
1850{
1851 struct fcoe_softc *fc;
1852
e8af4d43 1853 write_lock_bh(&fcoe_hostlist_lock);
85b4aa49
RL
1854 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1855 BUG_ON(!fc);
85b4aa49
RL
1856 list_del(&fc->list);
1857 write_unlock_bh(&fcoe_hostlist_lock);
1858
1859 return 0;
1860}
85b4aa49 1861
85b4aa49 1862/**
34f42a07 1863 * fcoe_init() - fcoe module loading initialization
85b4aa49 1864 *
85b4aa49 1865 * Returns 0 on success, negative on failure
34f42a07 1866 */
85b4aa49
RL
1867static int __init fcoe_init(void)
1868{
38eccabd 1869 unsigned int cpu;
8976f424 1870 int rc = 0;
85b4aa49
RL
1871 struct fcoe_percpu_s *p;
1872
85b4aa49
RL
1873 INIT_LIST_HEAD(&fcoe_hostlist);
1874 rwlock_init(&fcoe_hostlist_lock);
1875
38eccabd 1876 for_each_possible_cpu(cpu) {
5e5e92df 1877 p = &per_cpu(fcoe_percpu, cpu);
38eccabd
RL
1878 skb_queue_head_init(&p->fcoe_rx_list);
1879 }
1880
8976f424
RL
1881 for_each_online_cpu(cpu)
1882 fcoe_percpu_thread_create(cpu);
85b4aa49 1883
8976f424
RL
1884 /* Initialize per CPU interrupt thread */
1885 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1886 if (rc)
1887 goto out_free;
85b4aa49 1888
8976f424 1889 /* Setup link change notification */
85b4aa49
RL
1890 fcoe_dev_setup();
1891
7f349142 1892 fcoe_if_init();
85b4aa49
RL
1893
1894 return 0;
8976f424
RL
1895
1896out_free:
1897 for_each_online_cpu(cpu) {
1898 fcoe_percpu_thread_destroy(cpu);
1899 }
1900
1901 return rc;
85b4aa49
RL
1902}
1903module_init(fcoe_init);
1904
1905/**
34f42a07 1906 * fcoe_exit() - fcoe module unloading cleanup
85b4aa49
RL
1907 *
1908 * Returns 0 on success, negative on failure
34f42a07 1909 */
85b4aa49
RL
1910static void __exit fcoe_exit(void)
1911{
5e5e92df 1912 unsigned int cpu;
85b4aa49 1913 struct fcoe_softc *fc, *tmp;
85b4aa49 1914
85b4aa49
RL
1915 fcoe_dev_cleanup();
1916
5919a595 1917 /* releases the associated fcoe hosts */
85b4aa49 1918 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
7f349142 1919 fcoe_if_destroy(fc->real_dev);
85b4aa49 1920
8976f424
RL
1921 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1922
1923 for_each_online_cpu(cpu) {
1924 fcoe_percpu_thread_destroy(cpu);
85b4aa49
RL
1925 }
1926
7f349142
VD
1927 /* detach from scsi transport */
1928 fcoe_if_exit();
85b4aa49
RL
1929}
1930module_exit(fcoe_exit);