]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/libfc/fc_lport.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / scsi / libfc / fc_lport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
af901ca1 59 * notification. Currently, successful discovery causes the lport to take no
42e9a92f
RL
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
5a0e3ad6 91#include <linux/slab.h>
42e9a92f
RL
92#include <asm/unaligned.h>
93
94#include <scsi/fc/fc_gs.h>
95
96#include <scsi/libfc.h>
97#include <scsi/fc_encode.h>
a51ab396 98#include <linux/scatterlist.h>
42e9a92f 99
8866a5d9
RL
100#include "fc_libfc.h"
101
42e9a92f
RL
102/* Fabric IDs to use for point-to-point mode, chosen on whims. */
103#define FC_LOCAL_PTP_FID_LO 0x010101
104#define FC_LOCAL_PTP_FID_HI 0x010102
105
106#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
107
42e9a92f
RL
108static void fc_lport_error(struct fc_lport *, struct fc_frame *);
109
110static void fc_lport_enter_reset(struct fc_lport *);
111static void fc_lport_enter_flogi(struct fc_lport *);
112static void fc_lport_enter_dns(struct fc_lport *);
c914f7d1 113static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
42e9a92f
RL
114static void fc_lport_enter_scr(struct fc_lport *);
115static void fc_lport_enter_ready(struct fc_lport *);
116static void fc_lport_enter_logo(struct fc_lport *);
117
118static const char *fc_lport_state_names[] = {
b1d9fd55 119 [LPORT_ST_DISABLED] = "disabled",
42e9a92f
RL
120 [LPORT_ST_FLOGI] = "FLOGI",
121 [LPORT_ST_DNS] = "dNS",
c9c7bd7a 122 [LPORT_ST_RNN_ID] = "RNN_ID",
5baa17c3 123 [LPORT_ST_RSNN_NN] = "RSNN_NN",
c9866a54 124 [LPORT_ST_RSPN_ID] = "RSPN_ID",
42e9a92f 125 [LPORT_ST_RFT_ID] = "RFT_ID",
ab593b18 126 [LPORT_ST_RFF_ID] = "RFF_ID",
42e9a92f
RL
127 [LPORT_ST_SCR] = "SCR",
128 [LPORT_ST_READY] = "Ready",
129 [LPORT_ST_LOGO] = "LOGO",
130 [LPORT_ST_RESET] = "reset",
131};
132
a51ab396
SM
133/**
134 * struct fc_bsg_info - FC Passthrough managemet structure
135 * @job: The passthrough job
136 * @lport: The local port to pass through a command
137 * @rsp_code: The expected response code
3a3b42bf 138 * @sg: job->reply_payload.sg_list
a51ab396
SM
139 * @nents: job->reply_payload.sg_cnt
140 * @offset: The offset into the response data
141 */
142struct fc_bsg_info {
143 struct fc_bsg_job *job;
144 struct fc_lport *lport;
145 u16 rsp_code;
146 struct scatterlist *sg;
147 u32 nents;
148 size_t offset;
149};
150
3a3b42bf
RL
151/**
152 * fc_frame_drop() - Dummy frame handler
153 * @lport: The local port the frame was received on
154 * @fp: The received frame
155 */
42e9a92f
RL
156static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
157{
158 fc_frame_free(fp);
159 return 0;
160}
161
162/**
34f42a07 163 * fc_lport_rport_callback() - Event handler for rport events
42e9a92f 164 * @lport: The lport which is receiving the event
9fb9d328 165 * @rdata: private remote port data
42e9a92f
RL
166 * @event: The event that occured
167 *
168 * Locking Note: The rport lock should not be held when calling
169 * this function.
170 */
171static void fc_lport_rport_callback(struct fc_lport *lport,
9fb9d328 172 struct fc_rport_priv *rdata,
42e9a92f
RL
173 enum fc_rport_event event)
174{
7414705e 175 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
f211fa51 176 rdata->ids.port_id);
42e9a92f 177
b5cbf083 178 mutex_lock(&lport->lp_mutex);
42e9a92f 179 switch (event) {
4c0f62b5 180 case RPORT_EV_READY:
b5cbf083 181 if (lport->state == LPORT_ST_DNS) {
3a3b42bf 182 lport->dns_rdata = rdata;
c914f7d1 183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
b5cbf083
JE
184 } else {
185 FC_LPORT_DBG(lport, "Received an READY event "
186 "on port (%6x) for the directory "
187 "server, but the lport is not "
188 "in the DNS state, it's in the "
189 "%d state", rdata->ids.port_id,
190 lport->state);
191 lport->tt.rport_logoff(rdata);
192 }
42e9a92f
RL
193 break;
194 case RPORT_EV_LOGO:
195 case RPORT_EV_FAILED:
196 case RPORT_EV_STOP:
3a3b42bf 197 lport->dns_rdata = NULL;
42e9a92f
RL
198 break;
199 case RPORT_EV_NONE:
200 break;
201 }
b5cbf083 202 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
203}
204
205/**
34f42a07 206 * fc_lport_state() - Return a string which represents the lport's state
42e9a92f
RL
207 * @lport: The lport whose state is to converted to a string
208 */
209static const char *fc_lport_state(struct fc_lport *lport)
210{
211 const char *cp;
212
213 cp = fc_lport_state_names[lport->state];
214 if (!cp)
215 cp = "unknown";
216 return cp;
217}
218
219/**
34f42a07 220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
3a3b42bf
RL
221 * @lport: The lport to attach the ptp rport to
222 * @remote_fid: The FID of the ptp rport
42e9a92f
RL
223 * @remote_wwpn: The WWPN of the ptp rport
224 * @remote_wwnn: The WWNN of the ptp rport
225 */
226static void fc_lport_ptp_setup(struct fc_lport *lport,
227 u32 remote_fid, u64 remote_wwpn,
228 u64 remote_wwnn)
229{
48f00902 230 mutex_lock(&lport->disc.disc_mutex);
3a3b42bf
RL
231 if (lport->ptp_rdata)
232 lport->tt.rport_logoff(lport->ptp_rdata);
233 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
234 lport->ptp_rdata->ids.port_name = remote_wwpn;
235 lport->ptp_rdata->ids.node_name = remote_wwnn;
48f00902 236 mutex_unlock(&lport->disc.disc_mutex);
42e9a92f 237
3a3b42bf 238 lport->tt.rport_login(lport->ptp_rdata);
42e9a92f
RL
239
240 fc_lport_enter_ready(lport);
241}
242
3a3b42bf
RL
243/**
244 * fc_get_host_port_type() - Return the port type of the given Scsi_Host
245 * @shost: The SCSI host whose port type is to be determined
246 */
42e9a92f
RL
247void fc_get_host_port_type(struct Scsi_Host *shost)
248{
249 /* TODO - currently just NPORT */
250 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
251}
252EXPORT_SYMBOL(fc_get_host_port_type);
253
3a3b42bf
RL
254/**
255 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
256 * @shost: The SCSI host whose port state is to be determined
257 */
42e9a92f
RL
258void fc_get_host_port_state(struct Scsi_Host *shost)
259{
3a3b42bf 260 struct fc_lport *lport = shost_priv(shost);
42e9a92f 261
3a3b42bf
RL
262 mutex_lock(&lport->lp_mutex);
263 if (!lport->link_up)
8faecddb 264 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
42e9a92f 265 else
3a3b42bf 266 switch (lport->state) {
8faecddb
CL
267 case LPORT_ST_READY:
268 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
269 break;
270 default:
271 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
272 }
3a3b42bf 273 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
274}
275EXPORT_SYMBOL(fc_get_host_port_state);
276
3a3b42bf
RL
277/**
278 * fc_get_host_speed() - Return the speed of the given Scsi_Host
279 * @shost: The SCSI host whose port speed is to be determined
280 */
42e9a92f
RL
281void fc_get_host_speed(struct Scsi_Host *shost)
282{
283 struct fc_lport *lport = shost_priv(shost);
284
285 fc_host_speed(shost) = lport->link_speed;
286}
287EXPORT_SYMBOL(fc_get_host_speed);
288
3a3b42bf
RL
289/**
290 * fc_get_host_stats() - Return the Scsi_Host's statistics
291 * @shost: The SCSI host whose statistics are to be returned
292 */
42e9a92f
RL
293struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
294{
42e9a92f 295 struct fc_host_statistics *fcoe_stats;
3a3b42bf 296 struct fc_lport *lport = shost_priv(shost);
42e9a92f 297 struct timespec v0, v1;
582b45bc 298 unsigned int cpu;
42e9a92f 299
3a3b42bf 300 fcoe_stats = &lport->host_stats;
42e9a92f
RL
301 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
302
303 jiffies_to_timespec(jiffies, &v0);
3a3b42bf 304 jiffies_to_timespec(lport->boot_time, &v1);
42e9a92f
RL
305 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
306
582b45bc
RL
307 for_each_possible_cpu(cpu) {
308 struct fcoe_dev_stats *stats;
309
3a3b42bf 310 stats = per_cpu_ptr(lport->dev_stats, cpu);
582b45bc 311
42e9a92f
RL
312 fcoe_stats->tx_frames += stats->TxFrames;
313 fcoe_stats->tx_words += stats->TxWords;
314 fcoe_stats->rx_frames += stats->RxFrames;
315 fcoe_stats->rx_words += stats->RxWords;
316 fcoe_stats->error_frames += stats->ErrorFrames;
317 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
318 fcoe_stats->fcp_input_requests += stats->InputRequests;
319 fcoe_stats->fcp_output_requests += stats->OutputRequests;
320 fcoe_stats->fcp_control_requests += stats->ControlRequests;
321 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
322 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
323 fcoe_stats->link_failure_count += stats->LinkFailureCount;
324 }
325 fcoe_stats->lip_count = -1;
326 fcoe_stats->nos_count = -1;
327 fcoe_stats->loss_of_sync_count = -1;
328 fcoe_stats->loss_of_signal_count = -1;
329 fcoe_stats->prim_seq_protocol_err_count = -1;
330 fcoe_stats->dumped_frames = -1;
331 return fcoe_stats;
332}
333EXPORT_SYMBOL(fc_get_host_stats);
334
3a3b42bf
RL
335/**
336 * fc_lport_flogi_fill() - Fill in FLOGI command for request
337 * @lport: The local port the FLOGI is for
338 * @flogi: The FLOGI command
339 * @op: The opcode
42e9a92f 340 */
3a3b42bf
RL
341static void fc_lport_flogi_fill(struct fc_lport *lport,
342 struct fc_els_flogi *flogi,
343 unsigned int op)
42e9a92f
RL
344{
345 struct fc_els_csp *sp;
346 struct fc_els_cssp *cp;
347
348 memset(flogi, 0, sizeof(*flogi));
349 flogi->fl_cmd = (u8) op;
350 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
351 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
352 sp = &flogi->fl_csp;
353 sp->sp_hi_ver = 0x20;
354 sp->sp_lo_ver = 0x20;
355 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
356 sp->sp_bb_data = htons((u16) lport->mfs);
357 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
358 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
359 if (op != ELS_FLOGI) {
360 sp->sp_features = htons(FC_SP_FT_CIRO);
361 sp->sp_tot_seq = htons(255); /* seq. we accept */
362 sp->sp_rel_off = htons(0x1f);
363 sp->sp_e_d_tov = htonl(lport->e_d_tov);
364
365 cp->cp_rdfs = htons((u16) lport->mfs);
366 cp->cp_con_seq = htons(255);
367 cp->cp_open_seq = 1;
368 }
369}
370
3a3b42bf
RL
371/**
372 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
373 * @lport: The local port to add a new FC-4 type to
374 * @type: The new FC-4 type
42e9a92f
RL
375 */
376static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
377{
378 __be32 *mp;
379
380 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
381 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
382}
383
384/**
34f42a07 385 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
3a3b42bf
RL
386 * @sp: The sequence in the RLIR exchange
387 * @fp: The RLIR request frame
42e9a92f 388 * @lport: Fibre Channel local port recieving the RLIR
42e9a92f 389 *
1b69bc06 390 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
391 * this function.
392 */
393static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
394 struct fc_lport *lport)
395{
7414705e
RL
396 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
397 fc_lport_state(lport));
42e9a92f
RL
398
399 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
400 fc_frame_free(fp);
401}
402
403/**
34f42a07 404 * fc_lport_recv_echo_req() - Handle received ECHO request
3a3b42bf
RL
405 * @sp: The sequence in the ECHO exchange
406 * @fp: ECHO request frame
407 * @lport: The local port recieving the ECHO
42e9a92f 408 *
1b69bc06 409 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
410 * this function.
411 */
412static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
413 struct fc_lport *lport)
414{
415 struct fc_frame *fp;
416 struct fc_exch *ep = fc_seq_exch(sp);
417 unsigned int len;
418 void *pp;
419 void *dp;
420 u32 f_ctl;
421
1b69bc06 422 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
7414705e 423 fc_lport_state(lport));
42e9a92f
RL
424
425 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
426 pp = fc_frame_payload_get(in_fp, len);
427
428 if (len < sizeof(__be32))
429 len = sizeof(__be32);
430
431 fp = fc_frame_alloc(lport, len);
432 if (fp) {
433 dp = fc_frame_payload_get(fp, len);
434 memcpy(dp, pp, len);
1b69bc06 435 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
42e9a92f
RL
436 sp = lport->tt.seq_start_next(sp);
437 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
438 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
439 FC_TYPE_ELS, f_ctl, 0);
440 lport->tt.seq_send(lport, sp, fp);
441 }
442 fc_frame_free(in_fp);
443}
444
445/**
1b69bc06
JE
446 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
447 * @sp: The sequence in the RNID exchange
448 * @fp: The RNID request frame
449 * @lport: The local port recieving the RNID
42e9a92f 450 *
1b69bc06 451 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
452 * this function.
453 */
454static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
455 struct fc_lport *lport)
456{
457 struct fc_frame *fp;
458 struct fc_exch *ep = fc_seq_exch(sp);
459 struct fc_els_rnid *req;
460 struct {
461 struct fc_els_rnid_resp rnid;
462 struct fc_els_rnid_cid cid;
463 struct fc_els_rnid_gen gen;
464 } *rp;
465 struct fc_seq_els_data rjt_data;
466 u8 fmt;
467 size_t len;
468 u32 f_ctl;
469
7414705e
RL
470 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
471 fc_lport_state(lport));
42e9a92f
RL
472
473 req = fc_frame_payload_get(in_fp, sizeof(*req));
474 if (!req) {
475 rjt_data.fp = NULL;
476 rjt_data.reason = ELS_RJT_LOGIC;
477 rjt_data.explan = ELS_EXPL_NONE;
478 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
479 } else {
480 fmt = req->rnid_fmt;
481 len = sizeof(*rp);
482 if (fmt != ELS_RNIDF_GEN ||
483 ntohl(lport->rnid_gen.rnid_atype) == 0) {
484 fmt = ELS_RNIDF_NONE; /* nothing to provide */
485 len -= sizeof(rp->gen);
486 }
487 fp = fc_frame_alloc(lport, len);
488 if (fp) {
489 rp = fc_frame_payload_get(fp, len);
490 memset(rp, 0, len);
491 rp->rnid.rnid_cmd = ELS_LS_ACC;
492 rp->rnid.rnid_fmt = fmt;
493 rp->rnid.rnid_cid_len = sizeof(rp->cid);
494 rp->cid.rnid_wwpn = htonll(lport->wwpn);
495 rp->cid.rnid_wwnn = htonll(lport->wwnn);
496 if (fmt == ELS_RNIDF_GEN) {
497 rp->rnid.rnid_sid_len = sizeof(rp->gen);
498 memcpy(&rp->gen, &lport->rnid_gen,
499 sizeof(rp->gen));
500 }
501 sp = lport->tt.seq_start_next(sp);
502 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
503 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
504 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
505 FC_TYPE_ELS, f_ctl, 0);
506 lport->tt.seq_send(lport, sp, fp);
507 }
508 }
509 fc_frame_free(in_fp);
510}
511
42e9a92f 512/**
34f42a07 513 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
3a3b42bf
RL
514 * @sp: The sequence in the LOGO exchange
515 * @fp: The LOGO request frame
516 * @lport: The local port recieving the LOGO
42e9a92f
RL
517 *
518 * Locking Note: The lport lock is exected to be held before calling
519 * this function.
520 */
521static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
522 struct fc_lport *lport)
523{
524 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
525 fc_lport_enter_reset(lport);
526 fc_frame_free(fp);
527}
528
529/**
34f42a07 530 * fc_fabric_login() - Start the lport state machine
3a3b42bf 531 * @lport: The local port that should log into the fabric
42e9a92f
RL
532 *
533 * Locking Note: This function should not be called
534 * with the lport lock held.
535 */
536int fc_fabric_login(struct fc_lport *lport)
537{
538 int rc = -1;
539
540 mutex_lock(&lport->lp_mutex);
55a66d3c
VD
541 if (lport->state == LPORT_ST_DISABLED ||
542 lport->state == LPORT_ST_LOGO) {
543 fc_lport_state_enter(lport, LPORT_ST_RESET);
42e9a92f
RL
544 fc_lport_enter_reset(lport);
545 rc = 0;
546 }
547 mutex_unlock(&lport->lp_mutex);
548
549 return rc;
550}
551EXPORT_SYMBOL(fc_fabric_login);
552
553/**
8faecddb 554 * __fc_linkup() - Handler for transport linkup events
42e9a92f 555 * @lport: The lport whose link is up
8faecddb
CL
556 *
557 * Locking: must be called with the lp_mutex held
42e9a92f 558 */
8faecddb 559void __fc_linkup(struct fc_lport *lport)
42e9a92f 560{
bc0e17f6
VD
561 if (!lport->link_up) {
562 lport->link_up = 1;
42e9a92f
RL
563
564 if (lport->state == LPORT_ST_RESET)
565 fc_lport_enter_flogi(lport);
566 }
8faecddb
CL
567}
568
569/**
570 * fc_linkup() - Handler for transport linkup events
3a3b42bf 571 * @lport: The local port whose link is up
8faecddb
CL
572 */
573void fc_linkup(struct fc_lport *lport)
574{
e6d8a1b0
JE
575 printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n",
576 lport->host->host_no, fc_host_port_id(lport->host));
8faecddb
CL
577
578 mutex_lock(&lport->lp_mutex);
579 __fc_linkup(lport);
42e9a92f
RL
580 mutex_unlock(&lport->lp_mutex);
581}
582EXPORT_SYMBOL(fc_linkup);
583
584/**
8faecddb 585 * __fc_linkdown() - Handler for transport linkdown events
42e9a92f 586 * @lport: The lport whose link is down
8faecddb
CL
587 *
588 * Locking: must be called with the lp_mutex held
42e9a92f 589 */
8faecddb 590void __fc_linkdown(struct fc_lport *lport)
42e9a92f 591{
bc0e17f6
VD
592 if (lport->link_up) {
593 lport->link_up = 0;
42e9a92f
RL
594 fc_lport_enter_reset(lport);
595 lport->tt.fcp_cleanup(lport);
596 }
8faecddb
CL
597}
598
599/**
600 * fc_linkdown() - Handler for transport linkdown events
3a3b42bf 601 * @lport: The local port whose link is down
8faecddb
CL
602 */
603void fc_linkdown(struct fc_lport *lport)
604{
e6d8a1b0
JE
605 printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n",
606 lport->host->host_no, fc_host_port_id(lport->host));
8faecddb
CL
607
608 mutex_lock(&lport->lp_mutex);
609 __fc_linkdown(lport);
42e9a92f
RL
610 mutex_unlock(&lport->lp_mutex);
611}
612EXPORT_SYMBOL(fc_linkdown);
613
42e9a92f 614/**
34f42a07 615 * fc_fabric_logoff() - Logout of the fabric
3a3b42bf 616 * @lport: The local port to logoff the fabric
42e9a92f
RL
617 *
618 * Return value:
619 * 0 for success, -1 for failure
34f42a07 620 */
42e9a92f
RL
621int fc_fabric_logoff(struct fc_lport *lport)
622{
623 lport->tt.disc_stop_final(lport);
624 mutex_lock(&lport->lp_mutex);
3a3b42bf
RL
625 if (lport->dns_rdata)
626 lport->tt.rport_logoff(lport->dns_rdata);
a0fd2e49
AJ
627 mutex_unlock(&lport->lp_mutex);
628 lport->tt.rport_flush_queue();
629 mutex_lock(&lport->lp_mutex);
42e9a92f
RL
630 fc_lport_enter_logo(lport);
631 mutex_unlock(&lport->lp_mutex);
f7db2c15 632 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
633 return 0;
634}
635EXPORT_SYMBOL(fc_fabric_logoff);
636
637/**
3a3b42bf
RL
638 * fc_lport_destroy() - Unregister a fc_lport
639 * @lport: The local port to unregister
42e9a92f 640 *
42e9a92f
RL
641 * Note:
642 * exit routine for fc_lport instance
643 * clean-up all the allocated memory
644 * and free up other system resources.
645 *
34f42a07 646 */
42e9a92f
RL
647int fc_lport_destroy(struct fc_lport *lport)
648{
bbf15669 649 mutex_lock(&lport->lp_mutex);
b1d9fd55 650 lport->state = LPORT_ST_DISABLED;
bbf15669 651 lport->link_up = 0;
42e9a92f 652 lport->tt.frame_send = fc_frame_drop;
bbf15669
AJ
653 mutex_unlock(&lport->lp_mutex);
654
42e9a92f 655 lport->tt.fcp_abort_io(lport);
e9ba8b42 656 lport->tt.disc_stop_final(lport);
1f6ff364 657 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
658 return 0;
659}
660EXPORT_SYMBOL(fc_lport_destroy);
661
662/**
3a3b42bf
RL
663 * fc_set_mfs() - Set the maximum frame size for a local port
664 * @lport: The local port to set the MFS for
665 * @mfs: The new MFS
34f42a07 666 */
42e9a92f
RL
667int fc_set_mfs(struct fc_lport *lport, u32 mfs)
668{
669 unsigned int old_mfs;
670 int rc = -EINVAL;
671
672 mutex_lock(&lport->lp_mutex);
673
674 old_mfs = lport->mfs;
675
676 if (mfs >= FC_MIN_MAX_FRAME) {
677 mfs &= ~3;
678 if (mfs > FC_MAX_FRAME)
679 mfs = FC_MAX_FRAME;
680 mfs -= sizeof(struct fc_frame_header);
681 lport->mfs = mfs;
682 rc = 0;
683 }
684
685 if (!rc && mfs < old_mfs)
686 fc_lport_enter_reset(lport);
687
688 mutex_unlock(&lport->lp_mutex);
689
690 return rc;
691}
692EXPORT_SYMBOL(fc_set_mfs);
693
694/**
34f42a07 695 * fc_lport_disc_callback() - Callback for discovery events
3a3b42bf 696 * @lport: The local port receiving the event
42e9a92f
RL
697 * @event: The discovery event
698 */
699void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
700{
701 switch (event) {
702 case DISC_EV_SUCCESS:
7414705e 703 FC_LPORT_DBG(lport, "Discovery succeeded\n");
42e9a92f
RL
704 break;
705 case DISC_EV_FAILED:
e6d8a1b0
JE
706 printk(KERN_ERR "host%d: libfc: "
707 "Discovery failed for port (%6x)\n",
708 lport->host->host_no, fc_host_port_id(lport->host));
42e9a92f
RL
709 mutex_lock(&lport->lp_mutex);
710 fc_lport_enter_reset(lport);
711 mutex_unlock(&lport->lp_mutex);
712 break;
713 case DISC_EV_NONE:
714 WARN_ON(1);
715 break;
716 }
717}
718
719/**
34f42a07 720 * fc_rport_enter_ready() - Enter the ready state and start discovery
3a3b42bf 721 * @lport: The local port that is ready
42e9a92f
RL
722 *
723 * Locking Note: The lport lock is expected to be held before calling
724 * this routine.
725 */
726static void fc_lport_enter_ready(struct fc_lport *lport)
727{
7414705e
RL
728 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
729 fc_lport_state(lport));
42e9a92f
RL
730
731 fc_lport_state_enter(lport, LPORT_ST_READY);
8faecddb
CL
732 if (lport->vport)
733 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
734 fc_vports_linkchange(lport);
42e9a92f 735
3a3b42bf 736 if (!lport->ptp_rdata)
29d898e9 737 lport->tt.disc_start(fc_lport_disc_callback, lport);
42e9a92f
RL
738}
739
093bb6a2
JE
740/**
741 * fc_lport_set_port_id() - set the local port Port ID
742 * @lport: The local port which will have its Port ID set.
743 * @port_id: The new port ID.
744 * @fp: The frame containing the incoming request, or NULL.
745 *
746 * Locking Note: The lport lock is expected to be held before calling
747 * this function.
748 */
749static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
750 struct fc_frame *fp)
751{
752 if (port_id)
753 printk(KERN_INFO "host%d: Assigned Port ID %6x\n",
754 lport->host->host_no, port_id);
755
756 fc_host_port_id(lport->host) = port_id;
757 if (lport->tt.lport_set_port_id)
758 lport->tt.lport_set_port_id(lport, port_id, fp);
759}
760
42e9a92f 761/**
34f42a07 762 * fc_lport_recv_flogi_req() - Receive a FLOGI request
42e9a92f 763 * @sp_in: The sequence the FLOGI is on
3a3b42bf
RL
764 * @rx_fp: The FLOGI frame
765 * @lport: The local port that recieved the request
42e9a92f
RL
766 *
767 * A received FLOGI request indicates a point-to-point connection.
768 * Accept it with the common service parameters indicating our N port.
769 * Set up to do a PLOGI if we have the higher-number WWPN.
770 *
1b69bc06 771 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
772 * this function.
773 */
774static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
775 struct fc_frame *rx_fp,
776 struct fc_lport *lport)
777{
778 struct fc_frame *fp;
779 struct fc_frame_header *fh;
780 struct fc_seq *sp;
781 struct fc_exch *ep;
782 struct fc_els_flogi *flp;
783 struct fc_els_flogi *new_flp;
784 u64 remote_wwpn;
785 u32 remote_fid;
786 u32 local_fid;
787 u32 f_ctl;
788
7414705e
RL
789 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
790 fc_lport_state(lport));
42e9a92f
RL
791
792 fh = fc_frame_header_get(rx_fp);
793 remote_fid = ntoh24(fh->fh_s_id);
794 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
795 if (!flp)
796 goto out;
797 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
798 if (remote_wwpn == lport->wwpn) {
e6d8a1b0
JE
799 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
800 "with same WWPN %llx\n",
801 lport->host->host_no, remote_wwpn);
42e9a92f
RL
802 goto out;
803 }
7414705e 804 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
42e9a92f
RL
805
806 /*
807 * XXX what is the right thing to do for FIDs?
808 * The originator might expect our S_ID to be 0xfffffe.
809 * But if so, both of us could end up with the same FID.
810 */
811 local_fid = FC_LOCAL_PTP_FID_LO;
812 if (remote_wwpn < lport->wwpn) {
813 local_fid = FC_LOCAL_PTP_FID_HI;
814 if (!remote_fid || remote_fid == local_fid)
815 remote_fid = FC_LOCAL_PTP_FID_LO;
816 } else if (!remote_fid) {
817 remote_fid = FC_LOCAL_PTP_FID_HI;
818 }
819
093bb6a2 820 fc_lport_set_port_id(lport, local_fid, rx_fp);
42e9a92f
RL
821
822 fp = fc_frame_alloc(lport, sizeof(*flp));
823 if (fp) {
824 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
825 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
826 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
827 new_flp->fl_cmd = (u8) ELS_LS_ACC;
828
829 /*
830 * Send the response. If this fails, the originator should
831 * repeat the sequence.
832 */
833 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
834 ep = fc_seq_exch(sp);
835 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
836 FC_TYPE_ELS, f_ctl, 0);
837 lport->tt.seq_send(lport, sp, fp);
838
839 } else {
840 fc_lport_error(lport, fp);
841 }
842 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
843 get_unaligned_be64(&flp->fl_wwnn));
844
42e9a92f
RL
845out:
846 sp = fr_seq(rx_fp);
847 fc_frame_free(rx_fp);
848}
849
850/**
34f42a07 851 * fc_lport_recv_req() - The generic lport request handler
3a3b42bf
RL
852 * @lport: The local port that received the request
853 * @sp: The sequence the request is on
854 * @fp: The request frame
42e9a92f
RL
855 *
856 * This function will see if the lport handles the request or
857 * if an rport should handle the request.
858 *
859 * Locking Note: This function should not be called with the lport
860 * lock held becuase it will grab the lock.
861 */
862static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
863 struct fc_frame *fp)
864{
865 struct fc_frame_header *fh = fc_frame_header_get(fp);
866 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
42e9a92f
RL
867
868 mutex_lock(&lport->lp_mutex);
869
870 /*
871 * Handle special ELS cases like FLOGI, LOGO, and
872 * RSCN here. These don't require a session.
873 * Even if we had a session, it might not be ready.
874 */
e9ba8b42
JE
875 if (!lport->link_up)
876 fc_frame_free(fp);
877 else if (fh->fh_type == FC_TYPE_ELS &&
878 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
42e9a92f
RL
879 /*
880 * Check opcode.
881 */
131203a1 882 recv = lport->tt.rport_recv_req;
42e9a92f
RL
883 switch (fc_frame_payload_op(fp)) {
884 case ELS_FLOGI:
885 recv = fc_lport_recv_flogi_req;
886 break;
887 case ELS_LOGO:
888 fh = fc_frame_header_get(fp);
889 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
890 recv = fc_lport_recv_logo_req;
891 break;
892 case ELS_RSCN:
893 recv = lport->tt.disc_recv_req;
894 break;
895 case ELS_ECHO:
896 recv = fc_lport_recv_echo_req;
897 break;
898 case ELS_RLIR:
899 recv = fc_lport_recv_rlir_req;
900 break;
901 case ELS_RNID:
902 recv = fc_lport_recv_rnid_req;
903 break;
42e9a92f
RL
904 }
905
131203a1 906 recv(sp, fp, lport);
42e9a92f 907 } else {
7414705e
RL
908 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
909 fr_eof(fp));
42e9a92f
RL
910 fc_frame_free(fp);
911 }
912 mutex_unlock(&lport->lp_mutex);
913
914 /*
915 * The common exch_done for all request may not be good
916 * if any request requires longer hold on exhange. XXX
917 */
918 lport->tt.exch_done(sp);
919}
920
921/**
3a3b42bf
RL
922 * fc_lport_reset() - Reset a local port
923 * @lport: The local port which should be reset
42e9a92f
RL
924 *
925 * Locking Note: This functions should not be called with the
926 * lport lock held.
927 */
928int fc_lport_reset(struct fc_lport *lport)
929{
f7db2c15 930 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
931 mutex_lock(&lport->lp_mutex);
932 fc_lport_enter_reset(lport);
933 mutex_unlock(&lport->lp_mutex);
934 return 0;
935}
936EXPORT_SYMBOL(fc_lport_reset);
937
938/**
3a3b42bf
RL
939 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
940 * @lport: The local port to be reset
42e9a92f
RL
941 *
942 * Locking Note: The lport lock is expected to be held before calling
943 * this routine.
944 */
1190d925 945static void fc_lport_reset_locked(struct fc_lport *lport)
42e9a92f 946{
3a3b42bf
RL
947 if (lport->dns_rdata)
948 lport->tt.rport_logoff(lport->dns_rdata);
42e9a92f 949
3a3b42bf 950 lport->ptp_rdata = NULL;
42e9a92f
RL
951
952 lport->tt.disc_stop(lport);
953
1f6ff364 954 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f 955 fc_host_fabric_name(lport->host) = 0;
093bb6a2
JE
956
957 if (fc_host_port_id(lport->host))
958 fc_lport_set_port_id(lport, 0, NULL);
1190d925 959}
42e9a92f 960
1190d925
JE
961/**
962 * fc_lport_enter_reset() - Reset the local port
3a3b42bf 963 * @lport: The local port to be reset
1190d925
JE
964 *
965 * Locking Note: The lport lock is expected to be held before calling
966 * this routine.
967 */
968static void fc_lport_enter_reset(struct fc_lport *lport)
969{
970 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
971 fc_lport_state(lport));
972
55a66d3c
VD
973 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
974 return;
975
8faecddb
CL
976 if (lport->vport) {
977 if (lport->link_up)
978 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
979 else
980 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
981 }
1190d925 982 fc_lport_state_enter(lport, LPORT_ST_RESET);
8faecddb 983 fc_vports_linkchange(lport);
1190d925 984 fc_lport_reset_locked(lport);
bc0e17f6 985 if (lport->link_up)
42e9a92f
RL
986 fc_lport_enter_flogi(lport);
987}
988
1190d925 989/**
3a3b42bf
RL
990 * fc_lport_enter_disabled() - Disable the local port
991 * @lport: The local port to be reset
1190d925
JE
992 *
993 * Locking Note: The lport lock is expected to be held before calling
994 * this routine.
995 */
996static void fc_lport_enter_disabled(struct fc_lport *lport)
997{
998 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
999 fc_lport_state(lport));
1000
1001 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
8faecddb 1002 fc_vports_linkchange(lport);
1190d925
JE
1003 fc_lport_reset_locked(lport);
1004}
1005
42e9a92f 1006/**
34f42a07 1007 * fc_lport_error() - Handler for any errors
3a3b42bf
RL
1008 * @lport: The local port that the error was on
1009 * @fp: The error code encoded in a frame pointer
42e9a92f
RL
1010 *
1011 * If the error was caused by a resource allocation failure
1012 * then wait for half a second and retry, otherwise retry
1013 * after the e_d_tov time.
1014 */
1015static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1016{
1017 unsigned long delay = 0;
7414705e
RL
1018 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1019 PTR_ERR(fp), fc_lport_state(lport),
1020 lport->retry_count);
42e9a92f
RL
1021
1022 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1023 /*
1024 * Memory allocation failure, or the exchange timed out.
1025 * Retry after delay
1026 */
1027 if (lport->retry_count < lport->max_retry_count) {
1028 lport->retry_count++;
1029 if (!fp)
1030 delay = msecs_to_jiffies(500);
1031 else
1032 delay = msecs_to_jiffies(lport->e_d_tov);
1033
1034 schedule_delayed_work(&lport->retry_work, delay);
1035 } else {
1036 switch (lport->state) {
b1d9fd55 1037 case LPORT_ST_DISABLED:
42e9a92f
RL
1038 case LPORT_ST_READY:
1039 case LPORT_ST_RESET:
c9c7bd7a 1040 case LPORT_ST_RNN_ID:
5baa17c3 1041 case LPORT_ST_RSNN_NN:
c9866a54 1042 case LPORT_ST_RSPN_ID:
42e9a92f 1043 case LPORT_ST_RFT_ID:
ab593b18 1044 case LPORT_ST_RFF_ID:
42e9a92f
RL
1045 case LPORT_ST_SCR:
1046 case LPORT_ST_DNS:
1047 case LPORT_ST_FLOGI:
1048 case LPORT_ST_LOGO:
1049 fc_lport_enter_reset(lport);
1050 break;
1051 }
1052 }
1053 }
1054}
1055
1056/**
7cccc157 1057 * fc_lport_ns_resp() - Handle response to a name server
3a3b42bf
RL
1058 * registration exchange
1059 * @sp: current sequence in exchange
1060 * @fp: response frame
42e9a92f
RL
1061 * @lp_arg: Fibre Channel host port instance
1062 *
1063 * Locking Note: This function will be called without the lport lock
3a3b42bf 1064 * held, but it will lock, call an _enter_* function or fc_lport_error()
42e9a92f
RL
1065 * and then unlock the lport.
1066 */
7cccc157
CL
1067static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1068 void *lp_arg)
42e9a92f
RL
1069{
1070 struct fc_lport *lport = lp_arg;
1071 struct fc_frame_header *fh;
1072 struct fc_ct_hdr *ct;
1073
7cccc157 1074 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
f657d299 1075
42e9a92f
RL
1076 if (fp == ERR_PTR(-FC_EX_CLOSED))
1077 return;
1078
1079 mutex_lock(&lport->lp_mutex);
1080
ab593b18 1081 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
7cccc157 1082 FC_LPORT_DBG(lport, "Received a name server response, "
3a3b42bf 1083 "but in state %s\n", fc_lport_state(lport));
76f6804e
AJ
1084 if (IS_ERR(fp))
1085 goto err;
42e9a92f
RL
1086 goto out;
1087 }
1088
76f6804e
AJ
1089 if (IS_ERR(fp)) {
1090 fc_lport_error(lport, fp);
1091 goto err;
1092 }
1093
42e9a92f
RL
1094 fh = fc_frame_header_get(fp);
1095 ct = fc_frame_payload_get(fp, sizeof(*ct));
1096
1097 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1098 ct->ct_fs_type == FC_FST_DIR &&
1099 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1100 ntohs(ct->ct_cmd) == FC_FS_ACC)
7cccc157
CL
1101 switch (lport->state) {
1102 case LPORT_ST_RNN_ID:
c914f7d1 1103 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
7cccc157
CL
1104 break;
1105 case LPORT_ST_RSNN_NN:
c914f7d1 1106 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
7cccc157
CL
1107 break;
1108 case LPORT_ST_RSPN_ID:
c914f7d1 1109 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
7cccc157
CL
1110 break;
1111 case LPORT_ST_RFT_ID:
ab593b18
JE
1112 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1113 break;
1114 case LPORT_ST_RFF_ID:
7cccc157
CL
1115 fc_lport_enter_scr(lport);
1116 break;
1117 default:
1118 /* should have already been caught by state checks */
1119 break;
1120 }
c9c7bd7a
CL
1121 else
1122 fc_lport_error(lport, fp);
c9c7bd7a
CL
1123out:
1124 fc_frame_free(fp);
1125err:
1126 mutex_unlock(&lport->lp_mutex);
1127}
1128
42e9a92f 1129/**
34f42a07 1130 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
3a3b42bf
RL
1131 * @sp: current sequence in SCR exchange
1132 * @fp: response frame
42e9a92f
RL
1133 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1134 *
1135 * Locking Note: This function will be called without the lport lock
1136 * held, but it will lock, call an _enter_* function or fc_lport_error
1137 * and then unlock the lport.
1138 */
1139static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1140 void *lp_arg)
1141{
1142 struct fc_lport *lport = lp_arg;
1143 u8 op;
1144
f657d299
JE
1145 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1146
42e9a92f
RL
1147 if (fp == ERR_PTR(-FC_EX_CLOSED))
1148 return;
1149
1150 mutex_lock(&lport->lp_mutex);
1151
42e9a92f 1152 if (lport->state != LPORT_ST_SCR) {
7414705e
RL
1153 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1154 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1155 if (IS_ERR(fp))
1156 goto err;
42e9a92f
RL
1157 goto out;
1158 }
1159
76f6804e
AJ
1160 if (IS_ERR(fp)) {
1161 fc_lport_error(lport, fp);
1162 goto err;
1163 }
1164
42e9a92f
RL
1165 op = fc_frame_payload_op(fp);
1166 if (op == ELS_LS_ACC)
1167 fc_lport_enter_ready(lport);
1168 else
1169 fc_lport_error(lport, fp);
1170
1171out:
1172 fc_frame_free(fp);
1173err:
1174 mutex_unlock(&lport->lp_mutex);
1175}
1176
1177/**
3a3b42bf
RL
1178 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1179 * @lport: The local port to register for state changes
42e9a92f
RL
1180 *
1181 * Locking Note: The lport lock is expected to be held before calling
1182 * this routine.
1183 */
1184static void fc_lport_enter_scr(struct fc_lport *lport)
1185{
1186 struct fc_frame *fp;
1187
7414705e
RL
1188 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1189 fc_lport_state(lport));
42e9a92f
RL
1190
1191 fc_lport_state_enter(lport, LPORT_ST_SCR);
1192
1193 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1194 if (!fp) {
1195 fc_lport_error(lport, fp);
1196 return;
1197 }
1198
a46f327a 1199 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
b94f8951
JE
1200 fc_lport_scr_resp, lport,
1201 2 * lport->r_a_tov))
8f550f93 1202 fc_lport_error(lport, NULL);
42e9a92f
RL
1203}
1204
1205/**
c914f7d1 1206 * fc_lport_enter_ns() - register some object with the name server
42e9a92f
RL
1207 * @lport: Fibre Channel local port to register
1208 *
1209 * Locking Note: The lport lock is expected to be held before calling
1210 * this routine.
1211 */
c914f7d1 1212static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
c9866a54
CL
1213{
1214 struct fc_frame *fp;
c914f7d1
CL
1215 enum fc_ns_req cmd;
1216 int size = sizeof(struct fc_ct_hdr);
c9866a54
CL
1217 size_t len;
1218
c914f7d1
CL
1219 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1220 fc_lport_state_names[state],
c9866a54
CL
1221 fc_lport_state(lport));
1222
c914f7d1 1223 fc_lport_state_enter(lport, state);
c9866a54 1224
c914f7d1
CL
1225 switch (state) {
1226 case LPORT_ST_RNN_ID:
1227 cmd = FC_NS_RNN_ID;
1228 size += sizeof(struct fc_ns_rn_id);
1229 break;
1230 case LPORT_ST_RSNN_NN:
1231 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1232 /* if there is no symbolic name, skip to RFT_ID */
1233 if (!len)
1234 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1235 cmd = FC_NS_RSNN_NN;
1236 size += sizeof(struct fc_ns_rsnn) + len;
1237 break;
1238 case LPORT_ST_RSPN_ID:
1239 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1240 /* if there is no symbolic name, skip to RFT_ID */
1241 if (!len)
1242 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1243 cmd = FC_NS_RSPN_ID;
1244 size += sizeof(struct fc_ns_rspn) + len;
1245 break;
1246 case LPORT_ST_RFT_ID:
1247 cmd = FC_NS_RFT_ID;
1248 size += sizeof(struct fc_ns_rft);
1249 break;
ab593b18
JE
1250 case LPORT_ST_RFF_ID:
1251 cmd = FC_NS_RFF_ID;
1252 size += sizeof(struct fc_ns_rff_id);
1253 break;
c914f7d1
CL
1254 default:
1255 fc_lport_error(lport, NULL);
5baa17c3
CL
1256 return;
1257 }
1258
c914f7d1 1259 fp = fc_frame_alloc(lport, size);
c9c7bd7a
CL
1260 if (!fp) {
1261 fc_lport_error(lport, fp);
1262 return;
1263 }
1264
c914f7d1 1265 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
7cccc157 1266 fc_lport_ns_resp,
b94f8951 1267 lport, 3 * lport->r_a_tov))
c9c7bd7a
CL
1268 fc_lport_error(lport, fp);
1269}
1270
42e9a92f
RL
1271static struct fc_rport_operations fc_lport_rport_ops = {
1272 .event_callback = fc_lport_rport_callback,
1273};
1274
1275/**
3a3b42bf
RL
1276 * fc_rport_enter_dns() - Create a fc_rport for the name server
1277 * @lport: The local port requesting a remote port for the name server
42e9a92f
RL
1278 *
1279 * Locking Note: The lport lock is expected to be held before calling
1280 * this routine.
1281 */
1282static void fc_lport_enter_dns(struct fc_lport *lport)
1283{
ab28f1fd 1284 struct fc_rport_priv *rdata;
42e9a92f 1285
7414705e
RL
1286 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1287 fc_lport_state(lport));
42e9a92f
RL
1288
1289 fc_lport_state_enter(lport, LPORT_ST_DNS);
1290
48f00902 1291 mutex_lock(&lport->disc.disc_mutex);
9737e6a7 1292 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
48f00902 1293 mutex_unlock(&lport->disc.disc_mutex);
9fb9d328 1294 if (!rdata)
42e9a92f
RL
1295 goto err;
1296
42e9a92f 1297 rdata->ops = &fc_lport_rport_ops;
9fb9d328 1298 lport->tt.rport_login(rdata);
42e9a92f
RL
1299 return;
1300
1301err:
1302 fc_lport_error(lport, NULL);
1303}
1304
1305/**
3a3b42bf
RL
1306 * fc_lport_timeout() - Handler for the retry_work timer
1307 * @work: The work struct of the local port
42e9a92f
RL
1308 */
1309static void fc_lport_timeout(struct work_struct *work)
1310{
1311 struct fc_lport *lport =
1312 container_of(work, struct fc_lport,
1313 retry_work.work);
1314
1315 mutex_lock(&lport->lp_mutex);
1316
1317 switch (lport->state) {
b1d9fd55 1318 case LPORT_ST_DISABLED:
22655ac2
JE
1319 WARN_ON(1);
1320 break;
42e9a92f 1321 case LPORT_ST_READY:
42e9a92f
RL
1322 WARN_ON(1);
1323 break;
22655ac2
JE
1324 case LPORT_ST_RESET:
1325 break;
42e9a92f
RL
1326 case LPORT_ST_FLOGI:
1327 fc_lport_enter_flogi(lport);
1328 break;
1329 case LPORT_ST_DNS:
1330 fc_lport_enter_dns(lport);
1331 break;
c9c7bd7a 1332 case LPORT_ST_RNN_ID:
5baa17c3 1333 case LPORT_ST_RSNN_NN:
c9866a54 1334 case LPORT_ST_RSPN_ID:
42e9a92f 1335 case LPORT_ST_RFT_ID:
ab593b18 1336 case LPORT_ST_RFF_ID:
c914f7d1 1337 fc_lport_enter_ns(lport, lport->state);
42e9a92f
RL
1338 break;
1339 case LPORT_ST_SCR:
1340 fc_lport_enter_scr(lport);
1341 break;
1342 case LPORT_ST_LOGO:
1343 fc_lport_enter_logo(lport);
1344 break;
1345 }
1346
1347 mutex_unlock(&lport->lp_mutex);
1348}
1349
1350/**
34f42a07 1351 * fc_lport_logo_resp() - Handle response to LOGO request
3a3b42bf
RL
1352 * @sp: The sequence that the LOGO was on
1353 * @fp: The LOGO frame
1354 * @lp_arg: The lport port that received the LOGO request
42e9a92f
RL
1355 *
1356 * Locking Note: This function will be called without the lport lock
3a3b42bf 1357 * held, but it will lock, call an _enter_* function or fc_lport_error()
42e9a92f
RL
1358 * and then unlock the lport.
1359 */
11b56188 1360void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
3a3b42bf 1361 void *lp_arg)
42e9a92f
RL
1362{
1363 struct fc_lport *lport = lp_arg;
1364 u8 op;
1365
f657d299
JE
1366 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1367
42e9a92f
RL
1368 if (fp == ERR_PTR(-FC_EX_CLOSED))
1369 return;
1370
1371 mutex_lock(&lport->lp_mutex);
1372
42e9a92f 1373 if (lport->state != LPORT_ST_LOGO) {
7414705e
RL
1374 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1375 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1376 if (IS_ERR(fp))
1377 goto err;
42e9a92f
RL
1378 goto out;
1379 }
1380
76f6804e
AJ
1381 if (IS_ERR(fp)) {
1382 fc_lport_error(lport, fp);
1383 goto err;
1384 }
1385
42e9a92f
RL
1386 op = fc_frame_payload_op(fp);
1387 if (op == ELS_LS_ACC)
1190d925 1388 fc_lport_enter_disabled(lport);
42e9a92f
RL
1389 else
1390 fc_lport_error(lport, fp);
1391
1392out:
1393 fc_frame_free(fp);
1394err:
1395 mutex_unlock(&lport->lp_mutex);
1396}
11b56188 1397EXPORT_SYMBOL(fc_lport_logo_resp);
42e9a92f
RL
1398
1399/**
34f42a07 1400 * fc_rport_enter_logo() - Logout of the fabric
3a3b42bf 1401 * @lport: The local port to be logged out
42e9a92f
RL
1402 *
1403 * Locking Note: The lport lock is expected to be held before calling
1404 * this routine.
1405 */
1406static void fc_lport_enter_logo(struct fc_lport *lport)
1407{
1408 struct fc_frame *fp;
1409 struct fc_els_logo *logo;
1410
7414705e
RL
1411 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1412 fc_lport_state(lport));
42e9a92f
RL
1413
1414 fc_lport_state_enter(lport, LPORT_ST_LOGO);
8faecddb 1415 fc_vports_linkchange(lport);
42e9a92f 1416
42e9a92f
RL
1417 fp = fc_frame_alloc(lport, sizeof(*logo));
1418 if (!fp) {
1419 fc_lport_error(lport, fp);
1420 return;
1421 }
1422
a46f327a 1423 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
b94f8951
JE
1424 fc_lport_logo_resp, lport,
1425 2 * lport->r_a_tov))
8f550f93 1426 fc_lport_error(lport, NULL);
42e9a92f
RL
1427}
1428
1429/**
34f42a07 1430 * fc_lport_flogi_resp() - Handle response to FLOGI request
3a3b42bf
RL
1431 * @sp: The sequence that the FLOGI was on
1432 * @fp: The FLOGI response frame
1433 * @lp_arg: The lport port that received the FLOGI response
42e9a92f
RL
1434 *
1435 * Locking Note: This function will be called without the lport lock
3a3b42bf 1436 * held, but it will lock, call an _enter_* function or fc_lport_error()
42e9a92f
RL
1437 * and then unlock the lport.
1438 */
11b56188 1439void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3a3b42bf 1440 void *lp_arg)
42e9a92f
RL
1441{
1442 struct fc_lport *lport = lp_arg;
1443 struct fc_frame_header *fh;
1444 struct fc_els_flogi *flp;
1445 u32 did;
1446 u16 csp_flags;
1447 unsigned int r_a_tov;
1448 unsigned int e_d_tov;
1449 u16 mfs;
1450
f657d299
JE
1451 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1452
42e9a92f
RL
1453 if (fp == ERR_PTR(-FC_EX_CLOSED))
1454 return;
1455
1456 mutex_lock(&lport->lp_mutex);
1457
42e9a92f 1458 if (lport->state != LPORT_ST_FLOGI) {
7414705e
RL
1459 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1460 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1461 if (IS_ERR(fp))
1462 goto err;
42e9a92f
RL
1463 goto out;
1464 }
1465
76f6804e
AJ
1466 if (IS_ERR(fp)) {
1467 fc_lport_error(lport, fp);
1468 goto err;
1469 }
1470
42e9a92f
RL
1471 fh = fc_frame_header_get(fp);
1472 did = ntoh24(fh->fh_d_id);
1473 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
42e9a92f
RL
1474 flp = fc_frame_payload_get(fp, sizeof(*flp));
1475 if (flp) {
1476 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1477 FC_SP_BB_DATA_MASK;
1478 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1479 mfs < lport->mfs)
1480 lport->mfs = mfs;
1481 csp_flags = ntohs(flp->fl_csp.sp_features);
1482 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1483 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1484 if (csp_flags & FC_SP_FT_EDTR)
1485 e_d_tov /= 1000000;
db36c06c
CL
1486
1487 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1488
42e9a92f
RL
1489 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1490 if (e_d_tov > lport->e_d_tov)
1491 lport->e_d_tov = e_d_tov;
1492 lport->r_a_tov = 2 * e_d_tov;
093bb6a2 1493 fc_lport_set_port_id(lport, did, fp);
e6d8a1b0
JE
1494 printk(KERN_INFO "host%d: libfc: "
1495 "Port (%6x) entered "
1496 "point-to-point mode\n",
1497 lport->host->host_no, did);
42e9a92f
RL
1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1499 get_unaligned_be64(
1500 &flp->fl_wwpn),
1501 get_unaligned_be64(
1502 &flp->fl_wwnn));
1503 } else {
1504 lport->e_d_tov = e_d_tov;
1505 lport->r_a_tov = r_a_tov;
1506 fc_host_fabric_name(lport->host) =
1507 get_unaligned_be64(&flp->fl_wwnn);
093bb6a2 1508 fc_lport_set_port_id(lport, did, fp);
42e9a92f
RL
1509 fc_lport_enter_dns(lport);
1510 }
1511 }
42e9a92f 1512 } else {
7414705e 1513 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
42e9a92f
RL
1514 }
1515
1516out:
1517 fc_frame_free(fp);
1518err:
1519 mutex_unlock(&lport->lp_mutex);
1520}
11b56188 1521EXPORT_SYMBOL(fc_lport_flogi_resp);
42e9a92f
RL
1522
1523/**
34f42a07 1524 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
42e9a92f
RL
1525 * @lport: Fibre Channel local port to be logged in to the fabric
1526 *
1527 * Locking Note: The lport lock is expected to be held before calling
1528 * this routine.
1529 */
1530void fc_lport_enter_flogi(struct fc_lport *lport)
1531{
1532 struct fc_frame *fp;
1533
7414705e
RL
1534 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1535 fc_lport_state(lport));
42e9a92f
RL
1536
1537 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1538
1539 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1540 if (!fp)
1541 return fc_lport_error(lport, fp);
1542
db36c06c
CL
1543 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1544 lport->vport ? ELS_FDISC : ELS_FLOGI,
b94f8951
JE
1545 fc_lport_flogi_resp, lport,
1546 lport->vport ? 2 * lport->r_a_tov :
1547 lport->e_d_tov))
8f550f93 1548 fc_lport_error(lport, NULL);
42e9a92f
RL
1549}
1550
3a3b42bf
RL
1551/**
1552 * fc_lport_config() - Configure a fc_lport
1553 * @lport: The local port to be configured
1554 */
42e9a92f
RL
1555int fc_lport_config(struct fc_lport *lport)
1556{
1557 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1558 mutex_init(&lport->lp_mutex);
1559
b1d9fd55 1560 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
42e9a92f
RL
1561
1562 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1563 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1564
1565 return 0;
1566}
1567EXPORT_SYMBOL(fc_lport_config);
1568
3a3b42bf
RL
1569/**
1570 * fc_lport_init() - Initialize the lport layer for a local port
1571 * @lport: The local port to initialize the exchange layer for
1572 */
42e9a92f
RL
1573int fc_lport_init(struct fc_lport *lport)
1574{
1575 if (!lport->tt.lport_recv)
1576 lport->tt.lport_recv = fc_lport_recv_req;
1577
1578 if (!lport->tt.lport_reset)
1579 lport->tt.lport_reset = fc_lport_reset;
1580
1581 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1582 fc_host_node_name(lport->host) = lport->wwnn;
1583 fc_host_port_name(lport->host) = lport->wwpn;
1584 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1585 memset(fc_host_supported_fc4s(lport->host), 0,
1586 sizeof(fc_host_supported_fc4s(lport->host)));
1587 fc_host_supported_fc4s(lport->host)[2] = 1;
1588 fc_host_supported_fc4s(lport->host)[7] = 1;
1589
1590 /* This value is also unchanging */
1591 memset(fc_host_active_fc4s(lport->host), 0,
1592 sizeof(fc_host_active_fc4s(lport->host)));
1593 fc_host_active_fc4s(lport->host)[2] = 1;
1594 fc_host_active_fc4s(lport->host)[7] = 1;
1595 fc_host_maxframe_size(lport->host) = lport->mfs;
1596 fc_host_supported_speeds(lport->host) = 0;
1597 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1598 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1599 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1600 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1601
1602 return 0;
1603}
1604EXPORT_SYMBOL(fc_lport_init);
a51ab396
SM
1605
1606/**
3a3b42bf
RL
1607 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1608 * @sp: The sequence for the FC Passthrough response
1609 * @fp: The response frame
1610 * @info_arg: The BSG info that the response is for
a51ab396
SM
1611 */
1612static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1613 void *info_arg)
1614{
1615 struct fc_bsg_info *info = info_arg;
1616 struct fc_bsg_job *job = info->job;
1617 struct fc_lport *lport = info->lport;
1618 struct fc_frame_header *fh;
1619 size_t len;
1620 void *buf;
1621
1622 if (IS_ERR(fp)) {
1623 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1624 -ECONNABORTED : -ETIMEDOUT;
1625 job->reply_len = sizeof(uint32_t);
1626 job->state_flags |= FC_RQST_STATE_DONE;
1627 job->job_done(job);
1628 kfree(info);
1629 return;
1630 }
1631
1632 mutex_lock(&lport->lp_mutex);
1633 fh = fc_frame_header_get(fp);
1634 len = fr_len(fp) - sizeof(*fh);
1635 buf = fc_frame_payload_get(fp, 0);
1636
1637 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1638 /* Get the response code from the first frame payload */
1639 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1640 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1641 (unsigned short)fc_frame_payload_op(fp);
1642
1643 /* Save the reply status of the job */
1644 job->reply->reply_data.ctels_reply.status =
1645 (cmd == info->rsp_code) ?
1646 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1647 }
1648
1649 job->reply->reply_payload_rcv_len +=
1650 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1651 &info->offset, KM_BIO_SRC_IRQ, NULL);
1652
1653 if (fr_eof(fp) == FC_EOF_T &&
1654 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1655 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1656 if (job->reply->reply_payload_rcv_len >
1657 job->reply_payload.payload_len)
1658 job->reply->reply_payload_rcv_len =
1659 job->reply_payload.payload_len;
1660 job->reply->result = 0;
1661 job->state_flags |= FC_RQST_STATE_DONE;
1662 job->job_done(job);
1663 kfree(info);
1664 }
1665 fc_frame_free(fp);
1666 mutex_unlock(&lport->lp_mutex);
1667}
1668
1669/**
3a3b42bf
RL
1670 * fc_lport_els_request() - Send ELS passthrough request
1671 * @job: The BSG Passthrough job
a51ab396 1672 * @lport: The local port sending the request
3a3b42bf 1673 * @did: The destination port id
a51ab396
SM
1674 *
1675 * Locking Note: The lport lock is expected to be held before calling
1676 * this routine.
1677 */
1678static int fc_lport_els_request(struct fc_bsg_job *job,
1679 struct fc_lport *lport,
1680 u32 did, u32 tov)
1681{
1682 struct fc_bsg_info *info;
1683 struct fc_frame *fp;
1684 struct fc_frame_header *fh;
1685 char *pp;
1686 int len;
1687
70d919fb 1688 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
a51ab396
SM
1689 if (!fp)
1690 return -ENOMEM;
1691
1692 len = job->request_payload.payload_len;
1693 pp = fc_frame_payload_get(fp, len);
1694
1695 sg_copy_to_buffer(job->request_payload.sg_list,
1696 job->request_payload.sg_cnt,
1697 pp, len);
1698
1699 fh = fc_frame_header_get(fp);
1700 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1701 hton24(fh->fh_d_id, did);
1702 hton24(fh->fh_s_id, fc_host_port_id(lport->host));
1703 fh->fh_type = FC_TYPE_ELS;
1704 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1705 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1706 fh->fh_cs_ctl = 0;
1707 fh->fh_df_ctl = 0;
1708 fh->fh_parm_offset = 0;
1709
1710 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1711 if (!info) {
1712 fc_frame_free(fp);
1713 return -ENOMEM;
1714 }
1715
1716 info->job = job;
1717 info->lport = lport;
1718 info->rsp_code = ELS_LS_ACC;
1719 info->nents = job->reply_payload.sg_cnt;
1720 info->sg = job->reply_payload.sg_list;
1721
1722 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1723 NULL, info, tov))
1724 return -ECOMM;
1725 return 0;
1726}
1727
1728/**
3a3b42bf
RL
1729 * fc_lport_ct_request() - Send CT Passthrough request
1730 * @job: The BSG Passthrough job
a51ab396
SM
1731 * @lport: The local port sending the request
1732 * @did: The destination FC-ID
3a3b42bf 1733 * @tov: The timeout period to wait for the response
a51ab396
SM
1734 *
1735 * Locking Note: The lport lock is expected to be held before calling
1736 * this routine.
1737 */
1738static int fc_lport_ct_request(struct fc_bsg_job *job,
1739 struct fc_lport *lport, u32 did, u32 tov)
1740{
1741 struct fc_bsg_info *info;
1742 struct fc_frame *fp;
1743 struct fc_frame_header *fh;
1744 struct fc_ct_req *ct;
1745 size_t len;
1746
1747 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1748 job->request_payload.payload_len);
1749 if (!fp)
1750 return -ENOMEM;
1751
1752 len = job->request_payload.payload_len;
1753 ct = fc_frame_payload_get(fp, len);
1754
1755 sg_copy_to_buffer(job->request_payload.sg_list,
1756 job->request_payload.sg_cnt,
1757 ct, len);
1758
1759 fh = fc_frame_header_get(fp);
1760 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1761 hton24(fh->fh_d_id, did);
1762 hton24(fh->fh_s_id, fc_host_port_id(lport->host));
1763 fh->fh_type = FC_TYPE_CT;
1764 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1765 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
1766 fh->fh_cs_ctl = 0;
1767 fh->fh_df_ctl = 0;
1768 fh->fh_parm_offset = 0;
1769
1770 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1771 if (!info) {
1772 fc_frame_free(fp);
1773 return -ENOMEM;
1774 }
1775
1776 info->job = job;
1777 info->lport = lport;
1778 info->rsp_code = FC_FS_ACC;
1779 info->nents = job->reply_payload.sg_cnt;
1780 info->sg = job->reply_payload.sg_list;
1781
1782 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
1783 NULL, info, tov))
1784 return -ECOMM;
1785 return 0;
1786}
1787
1788/**
1789 * fc_lport_bsg_request() - The common entry point for sending
3a3b42bf
RL
1790 * FC Passthrough requests
1791 * @job: The BSG passthrough job
a51ab396
SM
1792 */
1793int fc_lport_bsg_request(struct fc_bsg_job *job)
1794{
1795 struct request *rsp = job->req->next_rq;
1796 struct Scsi_Host *shost = job->shost;
1797 struct fc_lport *lport = shost_priv(shost);
1798 struct fc_rport *rport;
1799 struct fc_rport_priv *rdata;
1800 int rc = -EINVAL;
1801 u32 did;
1802
1803 job->reply->reply_payload_rcv_len = 0;
b248df30
HD
1804 if (rsp)
1805 rsp->resid_len = job->reply_payload.payload_len;
a51ab396
SM
1806
1807 mutex_lock(&lport->lp_mutex);
1808
1809 switch (job->request->msgcode) {
1810 case FC_BSG_RPT_ELS:
1811 rport = job->rport;
1812 if (!rport)
1813 break;
1814
1815 rdata = rport->dd_data;
1816 rc = fc_lport_els_request(job, lport, rport->port_id,
1817 rdata->e_d_tov);
1818 break;
1819
1820 case FC_BSG_RPT_CT:
1821 rport = job->rport;
1822 if (!rport)
1823 break;
1824
1825 rdata = rport->dd_data;
1826 rc = fc_lport_ct_request(job, lport, rport->port_id,
1827 rdata->e_d_tov);
1828 break;
1829
1830 case FC_BSG_HST_CT:
1831 did = ntoh24(job->request->rqst_data.h_ct.port_id);
1832 if (did == FC_FID_DIR_SERV)
3a3b42bf 1833 rdata = lport->dns_rdata;
a51ab396
SM
1834 else
1835 rdata = lport->tt.rport_lookup(lport, did);
1836
1837 if (!rdata)
1838 break;
1839
1840 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
1841 break;
1842
1843 case FC_BSG_HST_ELS_NOLOGIN:
1844 did = ntoh24(job->request->rqst_data.h_els.port_id);
1845 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
1846 break;
1847 }
1848
1849 mutex_unlock(&lport->lp_mutex);
1850 return rc;
1851}
1852EXPORT_SYMBOL(fc_lport_bsg_request);