]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/bfa/bfa_fcpim.c
Merge branch 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen
[net-next-2.6.git] / drivers / scsi / bfa / bfa_fcpim.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
a36c61f9
KG
18#include "bfa_modules.h"
19#include "bfa_cb_ioim.h"
7725ccfd
JH
20
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
a36c61f9
KG
24
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
5fbe25c7 29/*
a36c61f9
KG
30 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
5fbe25c7 75/*
a36c61f9
KG
76 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
5fbe25c7 92/*
a36c61f9
KG
93 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
5fbe25c7 110/*
a36c61f9
KG
111 * hal_ioim_sm
112 */
113
5fbe25c7 114/*
a36c61f9
KG
115 * IO state machine events
116 */
117enum bfa_ioim_event {
118 BFA_IOIM_SM_START = 1, /* io start request from host */
119 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
120 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
121 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
122 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
123 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
124 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
125 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
126 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
127 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
128 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
129 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
130 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
131 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
132 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
133 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
134 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
135 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
136};
137
138
5fbe25c7 139/*
a36c61f9
KG
140 * BFA TSKIM related definitions
141 */
142
5fbe25c7 143/*
a36c61f9
KG
144 * task management completion handling
145 */
146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
147 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
148 bfa_tskim_notify_comp(__tskim); \
149} while (0)
150
151#define bfa_tskim_notify_comp(__tskim) do { \
152 if ((__tskim)->notify) \
153 bfa_itnim_tskdone((__tskim)->itnim); \
154} while (0)
155
156
157enum bfa_tskim_event {
158 BFA_TSKIM_SM_START = 1, /* TM command start */
159 BFA_TSKIM_SM_DONE = 2, /* TM completion */
160 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
161 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
162 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
163 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
164 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
166};
167
5fbe25c7 168/*
a36c61f9
KG
169 * forward declaration for BFA ITNIM functions
170 */
171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
172static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
173static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
174static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
175static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
176static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
177static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
178static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
179static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov(void *itnim_arg);
182static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
185
5fbe25c7 186/*
a36c61f9
KG
187 * forward declaration of ITNIM state machine
188 */
189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event);
191static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event);
193static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event);
195static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event);
197static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event);
199static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event);
201static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event);
203static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event);
205static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event);
207static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event);
209static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
210 enum bfa_itnim_event event);
211static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
212 enum bfa_itnim_event event);
213static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
214 enum bfa_itnim_event event);
215static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
216 enum bfa_itnim_event event);
217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
218 enum bfa_itnim_event event);
219
5fbe25c7 220/*
a36c61f9
KG
221 * forward declaration for BFA IOIM functions
222 */
223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
224static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
225static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
226static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
227static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
228static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
229static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
230static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
231static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
232static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234
235
5fbe25c7 236/*
a36c61f9
KG
237 * forward declaration of BFA IO state machine
238 */
239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
252 enum bfa_ioim_event event);
253static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
254 enum bfa_ioim_event event);
255static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
260 enum bfa_ioim_event event);
261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event);
263
5fbe25c7 264/*
a36c61f9
KG
265 * forward declaration for BFA TSKIM functions
266 */
267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
268static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
269static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
270 lun_t lun);
271static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
272static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
273static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
274static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
275static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277
278
5fbe25c7 279/*
a36c61f9
KG
280 * forward declaration of BFA TSKIM state machine
281 */
282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
283 enum bfa_tskim_event event);
284static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
285 enum bfa_tskim_event event);
286static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
287 enum bfa_tskim_event event);
288static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
289 enum bfa_tskim_event event);
290static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
291 enum bfa_tskim_event event);
292static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
293 enum bfa_tskim_event event);
294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295 enum bfa_tskim_event event);
296
5fbe25c7 297/*
7725ccfd
JH
298 * hal_fcpim_mod BFA FCP Initiator Mode module
299 */
300
5fbe25c7 301/*
a36c61f9 302 * Compute and return memory needed by FCP(im) module.
7725ccfd
JH
303 */
304static void
305bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
306 u32 *dm_len)
307{
308 bfa_itnim_meminfo(cfg, km_len, dm_len);
309
5fbe25c7 310 /*
7725ccfd
JH
311 * IO memory
312 */
313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
314 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
315 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
316 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
317
318 *km_len += cfg->fwcfg.num_ioim_reqs *
319 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
320
321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
322
5fbe25c7 323 /*
7725ccfd
JH
324 * task management command memory
325 */
326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
327 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
328 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
329}
330
331
332static void
333bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
a36c61f9 334 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
7725ccfd
JH
335{
336 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
337
338 bfa_trc(bfa, cfg->drvcfg.path_tov);
339 bfa_trc(bfa, cfg->fwcfg.num_rports);
340 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
341 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
342
a36c61f9
KG
343 fcpim->bfa = bfa;
344 fcpim->num_itnims = cfg->fwcfg.num_rports;
7725ccfd
JH
345 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
346 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
a36c61f9
KG
347 fcpim->path_tov = cfg->drvcfg.path_tov;
348 fcpim->delay_comp = cfg->drvcfg.delay_comp;
349 fcpim->profile_comp = NULL;
350 fcpim->profile_start = NULL;
7725ccfd
JH
351
352 bfa_itnim_attach(fcpim, meminfo);
353 bfa_tskim_attach(fcpim, meminfo);
354 bfa_ioim_attach(fcpim, meminfo);
355}
356
7725ccfd
JH
357static void
358bfa_fcpim_detach(struct bfa_s *bfa)
359{
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361
362 bfa_ioim_detach(fcpim);
363 bfa_tskim_detach(fcpim);
364}
365
366static void
367bfa_fcpim_start(struct bfa_s *bfa)
368{
369}
370
371static void
372bfa_fcpim_stop(struct bfa_s *bfa)
373{
374}
375
376static void
377bfa_fcpim_iocdisable(struct bfa_s *bfa)
378{
379 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
380 struct bfa_itnim_s *itnim;
a36c61f9 381 struct list_head *qe, *qen;
7725ccfd
JH
382
383 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
384 itnim = (struct bfa_itnim_s *) qe;
385 bfa_itnim_iocdisable(itnim);
386 }
387}
388
a36c61f9
KG
389void
390bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
391 struct bfa_itnim_iostats_s *rstats)
392{
393 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
394 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
395 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
396 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
397 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
398 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
399 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
400 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
401 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
402 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
403 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
404 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
405 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
406 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
407 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
408 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
409 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
410 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
411 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
412 bfa_fcpim_add_iostats(lstats, rstats, onlines);
413 bfa_fcpim_add_iostats(lstats, rstats, offlines);
414 bfa_fcpim_add_iostats(lstats, rstats, creates);
415 bfa_fcpim_add_iostats(lstats, rstats, deletes);
416 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
417 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
418 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
419 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
420 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
421 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
422 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
424 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
425 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
426 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
427 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
428 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
429 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
430 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
431 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
432 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
433 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
434 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
435 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
436 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
437}
438
7725ccfd
JH
439void
440bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
441{
442 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
443
444 fcpim->path_tov = path_tov * 1000;
445 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
446 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
447}
448
449u16
450bfa_fcpim_path_tov_get(struct bfa_s *bfa)
451{
452 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
453
f8ceafde 454 return fcpim->path_tov / 1000;
7725ccfd
JH
455}
456
457bfa_status_t
a36c61f9
KG
458bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
459 u8 lp_tag)
460{
461 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
462 struct list_head *qe, *qen;
463 struct bfa_itnim_s *itnim;
464
465 /* accumulate IO stats from itnim */
6a18b167 466 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
a36c61f9
KG
467 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
468 itnim = (struct bfa_itnim_s *) qe;
469 if (itnim->rport->rport_info.lp_tag != lp_tag)
470 continue;
471 bfa_fcpim_add_stats(stats, &(itnim->stats));
472 }
473 return BFA_STATUS_OK;
474}
475bfa_status_t
476bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
477{
478 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
479 struct list_head *qe, *qen;
480 struct bfa_itnim_s *itnim;
481
482 /* accumulate IO stats from itnim */
6a18b167 483 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
a36c61f9
KG
484 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
485 itnim = (struct bfa_itnim_s *) qe;
486 bfa_fcpim_add_stats(modstats, &(itnim->stats));
487 }
488 return BFA_STATUS_OK;
489}
490
491bfa_status_t
492bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
493 struct bfa_fcpim_del_itn_stats_s *modstats)
494{
495 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
496
497 *modstats = fcpim->del_itn_stats;
498
499 return BFA_STATUS_OK;
500}
501
502
503bfa_status_t
504bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
505{
506 struct bfa_itnim_s *itnim;
507 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
508 struct list_head *qe, *qen;
509
510 /* accumulate IO stats from itnim */
511 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
512 itnim = (struct bfa_itnim_s *) qe;
513 bfa_itnim_clear_stats(itnim);
514 }
515 fcpim->io_profile = BFA_TRUE;
516 fcpim->io_profile_start_time = time;
517 fcpim->profile_comp = bfa_ioim_profile_comp;
518 fcpim->profile_start = bfa_ioim_profile_start;
519
520 return BFA_STATUS_OK;
521}
522bfa_status_t
523bfa_fcpim_profile_off(struct bfa_s *bfa)
7725ccfd
JH
524{
525 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
a36c61f9
KG
526 fcpim->io_profile = BFA_FALSE;
527 fcpim->io_profile_start_time = 0;
528 fcpim->profile_comp = NULL;
529 fcpim->profile_start = NULL;
530 return BFA_STATUS_OK;
531}
7725ccfd 532
a36c61f9
KG
533bfa_status_t
534bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
535{
536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
537 struct list_head *qe, *qen;
538 struct bfa_itnim_s *itnim;
7725ccfd 539
a36c61f9
KG
540 /* clear IO stats from all active itnims */
541 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
542 itnim = (struct bfa_itnim_s *) qe;
543 if (itnim->rport->rport_info.lp_tag != lp_tag)
544 continue;
545 bfa_itnim_clear_stats(itnim);
546 }
7725ccfd 547 return BFA_STATUS_OK;
a36c61f9 548
7725ccfd
JH
549}
550
551bfa_status_t
552bfa_fcpim_clr_modstats(struct bfa_s *bfa)
553{
554 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
a36c61f9
KG
555 struct list_head *qe, *qen;
556 struct bfa_itnim_s *itnim;
7725ccfd 557
a36c61f9
KG
558 /* clear IO stats from all active itnims */
559 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
560 itnim = (struct bfa_itnim_s *) qe;
561 bfa_itnim_clear_stats(itnim);
562 }
6a18b167 563 memset(&fcpim->del_itn_stats, 0,
a36c61f9 564 sizeof(struct bfa_fcpim_del_itn_stats_s));
7725ccfd
JH
565
566 return BFA_STATUS_OK;
567}
568
569void
570bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
571{
572 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
573
574 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
575
576 fcpim->q_depth = q_depth;
577}
578
579u16
580bfa_fcpim_qdepth_get(struct bfa_s *bfa)
581{
582 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
583
f8ceafde 584 return fcpim->q_depth;
7725ccfd
JH
585}
586
36d345a7
JH
587void
588bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
589{
590 bfa_boolean_t ioredirect;
591
592 /*
593 * IO redirection is turned off when QoS is enabled and vice versa
594 */
595 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
36d345a7
JH
596}
597
598void
599bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
600{
601 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
602 fcpim->ioredirect = state;
603}
a36c61f9
KG
604
605
606
5fbe25c7 607/*
a36c61f9
KG
608 * BFA ITNIM module state machine functions
609 */
610
5fbe25c7 611/*
a36c61f9
KG
612 * Beginning/unallocated state - no events expected.
613 */
614static void
615bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
616{
617 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
618 bfa_trc(itnim->bfa, event);
619
620 switch (event) {
621 case BFA_ITNIM_SM_CREATE:
622 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
623 itnim->is_online = BFA_FALSE;
624 bfa_fcpim_additn(itnim);
625 break;
626
627 default:
628 bfa_sm_fault(itnim->bfa, event);
629 }
630}
631
5fbe25c7 632/*
a36c61f9
KG
633 * Beginning state, only online event expected.
634 */
635static void
636bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
637{
638 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
639 bfa_trc(itnim->bfa, event);
640
641 switch (event) {
642 case BFA_ITNIM_SM_ONLINE:
643 if (bfa_itnim_send_fwcreate(itnim))
644 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
645 else
646 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
647 break;
648
649 case BFA_ITNIM_SM_DELETE:
650 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
651 bfa_fcpim_delitn(itnim);
652 break;
653
654 case BFA_ITNIM_SM_HWFAIL:
655 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
656 break;
657
658 default:
659 bfa_sm_fault(itnim->bfa, event);
660 }
661}
662
5fbe25c7 663/*
a36c61f9
KG
664 * Waiting for itnim create response from firmware.
665 */
666static void
667bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
668{
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event);
671
672 switch (event) {
673 case BFA_ITNIM_SM_FWRSP:
674 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
675 itnim->is_online = BFA_TRUE;
676 bfa_itnim_iotov_online(itnim);
677 bfa_itnim_online_cb(itnim);
678 break;
679
680 case BFA_ITNIM_SM_DELETE:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
682 break;
683
684 case BFA_ITNIM_SM_OFFLINE:
685 if (bfa_itnim_send_fwdelete(itnim))
686 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
687 else
688 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
689 break;
690
691 case BFA_ITNIM_SM_HWFAIL:
692 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
693 break;
694
695 default:
696 bfa_sm_fault(itnim->bfa, event);
697 }
698}
699
700static void
701bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
702 enum bfa_itnim_event event)
703{
704 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
705 bfa_trc(itnim->bfa, event);
706
707 switch (event) {
708 case BFA_ITNIM_SM_QRESUME:
709 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
710 bfa_itnim_send_fwcreate(itnim);
711 break;
712
713 case BFA_ITNIM_SM_DELETE:
714 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
715 bfa_reqq_wcancel(&itnim->reqq_wait);
716 bfa_fcpim_delitn(itnim);
717 break;
718
719 case BFA_ITNIM_SM_OFFLINE:
720 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
721 bfa_reqq_wcancel(&itnim->reqq_wait);
722 bfa_itnim_offline_cb(itnim);
723 break;
724
725 case BFA_ITNIM_SM_HWFAIL:
726 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
727 bfa_reqq_wcancel(&itnim->reqq_wait);
728 break;
729
730 default:
731 bfa_sm_fault(itnim->bfa, event);
732 }
733}
734
5fbe25c7 735/*
a36c61f9
KG
736 * Waiting for itnim create response from firmware, a delete is pending.
737 */
738static void
739bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
740 enum bfa_itnim_event event)
741{
742 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
743 bfa_trc(itnim->bfa, event);
744
745 switch (event) {
746 case BFA_ITNIM_SM_FWRSP:
747 if (bfa_itnim_send_fwdelete(itnim))
748 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
749 else
750 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
751 break;
752
753 case BFA_ITNIM_SM_HWFAIL:
754 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
755 bfa_fcpim_delitn(itnim);
756 break;
757
758 default:
759 bfa_sm_fault(itnim->bfa, event);
760 }
761}
762
5fbe25c7 763/*
a36c61f9
KG
764 * Online state - normal parking state.
765 */
766static void
767bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
768{
769 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
770 bfa_trc(itnim->bfa, event);
771
772 switch (event) {
773 case BFA_ITNIM_SM_OFFLINE:
774 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
775 itnim->is_online = BFA_FALSE;
776 bfa_itnim_iotov_start(itnim);
777 bfa_itnim_cleanup(itnim);
778 break;
779
780 case BFA_ITNIM_SM_DELETE:
781 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
782 itnim->is_online = BFA_FALSE;
783 bfa_itnim_cleanup(itnim);
784 break;
785
786 case BFA_ITNIM_SM_SLER:
787 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
788 itnim->is_online = BFA_FALSE;
789 bfa_itnim_iotov_start(itnim);
790 bfa_itnim_sler_cb(itnim);
791 break;
792
793 case BFA_ITNIM_SM_HWFAIL:
794 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
795 itnim->is_online = BFA_FALSE;
796 bfa_itnim_iotov_start(itnim);
797 bfa_itnim_iocdisable_cleanup(itnim);
798 break;
799
800 default:
801 bfa_sm_fault(itnim->bfa, event);
802 }
803}
804
5fbe25c7 805/*
a36c61f9
KG
806 * Second level error recovery need.
807 */
808static void
809bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
810{
811 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
812 bfa_trc(itnim->bfa, event);
813
814 switch (event) {
815 case BFA_ITNIM_SM_OFFLINE:
816 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
817 bfa_itnim_cleanup(itnim);
818 break;
819
820 case BFA_ITNIM_SM_DELETE:
821 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
822 bfa_itnim_cleanup(itnim);
823 bfa_itnim_iotov_delete(itnim);
824 break;
825
826 case BFA_ITNIM_SM_HWFAIL:
827 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
828 bfa_itnim_iocdisable_cleanup(itnim);
829 break;
830
831 default:
832 bfa_sm_fault(itnim->bfa, event);
833 }
834}
835
5fbe25c7 836/*
a36c61f9
KG
837 * Going offline. Waiting for active IO cleanup.
838 */
839static void
840bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
841 enum bfa_itnim_event event)
842{
843 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
844 bfa_trc(itnim->bfa, event);
845
846 switch (event) {
847 case BFA_ITNIM_SM_CLEANUP:
848 if (bfa_itnim_send_fwdelete(itnim))
849 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
850 else
851 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
852 break;
853
854 case BFA_ITNIM_SM_DELETE:
855 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
856 bfa_itnim_iotov_delete(itnim);
857 break;
858
859 case BFA_ITNIM_SM_HWFAIL:
860 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
861 bfa_itnim_iocdisable_cleanup(itnim);
862 bfa_itnim_offline_cb(itnim);
863 break;
864
865 case BFA_ITNIM_SM_SLER:
866 break;
867
868 default:
869 bfa_sm_fault(itnim->bfa, event);
870 }
871}
872
5fbe25c7 873/*
a36c61f9
KG
874 * Deleting itnim. Waiting for active IO cleanup.
875 */
876static void
877bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
878 enum bfa_itnim_event event)
879{
880 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
881 bfa_trc(itnim->bfa, event);
882
883 switch (event) {
884 case BFA_ITNIM_SM_CLEANUP:
885 if (bfa_itnim_send_fwdelete(itnim))
886 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
887 else
888 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
889 break;
890
891 case BFA_ITNIM_SM_HWFAIL:
892 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
893 bfa_itnim_iocdisable_cleanup(itnim);
894 break;
895
896 default:
897 bfa_sm_fault(itnim->bfa, event);
898 }
899}
900
5fbe25c7 901/*
a36c61f9
KG
902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
903 */
904static void
905bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
906{
907 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
908 bfa_trc(itnim->bfa, event);
909
910 switch (event) {
911 case BFA_ITNIM_SM_FWRSP:
912 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
913 bfa_itnim_offline_cb(itnim);
914 break;
915
916 case BFA_ITNIM_SM_DELETE:
917 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
918 break;
919
920 case BFA_ITNIM_SM_HWFAIL:
921 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
922 bfa_itnim_offline_cb(itnim);
923 break;
924
925 default:
926 bfa_sm_fault(itnim->bfa, event);
927 }
928}
929
930static void
931bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
932 enum bfa_itnim_event event)
933{
934 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
935 bfa_trc(itnim->bfa, event);
936
937 switch (event) {
938 case BFA_ITNIM_SM_QRESUME:
939 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
940 bfa_itnim_send_fwdelete(itnim);
941 break;
942
943 case BFA_ITNIM_SM_DELETE:
944 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
945 break;
946
947 case BFA_ITNIM_SM_HWFAIL:
948 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
949 bfa_reqq_wcancel(&itnim->reqq_wait);
950 bfa_itnim_offline_cb(itnim);
951 break;
952
953 default:
954 bfa_sm_fault(itnim->bfa, event);
955 }
956}
957
5fbe25c7 958/*
a36c61f9
KG
959 * Offline state.
960 */
961static void
962bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
963{
964 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
965 bfa_trc(itnim->bfa, event);
966
967 switch (event) {
968 case BFA_ITNIM_SM_DELETE:
969 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
970 bfa_itnim_iotov_delete(itnim);
971 bfa_fcpim_delitn(itnim);
972 break;
973
974 case BFA_ITNIM_SM_ONLINE:
975 if (bfa_itnim_send_fwcreate(itnim))
976 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
977 else
978 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
979 break;
980
981 case BFA_ITNIM_SM_HWFAIL:
982 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
983 break;
984
985 default:
986 bfa_sm_fault(itnim->bfa, event);
987 }
988}
989
5fbe25c7 990/*
a36c61f9
KG
991 * IOC h/w failed state.
992 */
993static void
994bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
995 enum bfa_itnim_event event)
996{
997 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
998 bfa_trc(itnim->bfa, event);
999
1000 switch (event) {
1001 case BFA_ITNIM_SM_DELETE:
1002 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1003 bfa_itnim_iotov_delete(itnim);
1004 bfa_fcpim_delitn(itnim);
1005 break;
1006
1007 case BFA_ITNIM_SM_OFFLINE:
1008 bfa_itnim_offline_cb(itnim);
1009 break;
1010
1011 case BFA_ITNIM_SM_ONLINE:
1012 if (bfa_itnim_send_fwcreate(itnim))
1013 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1014 else
1015 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1016 break;
1017
1018 case BFA_ITNIM_SM_HWFAIL:
1019 break;
1020
1021 default:
1022 bfa_sm_fault(itnim->bfa, event);
1023 }
1024}
1025
5fbe25c7 1026/*
a36c61f9
KG
1027 * Itnim is deleted, waiting for firmware response to delete.
1028 */
1029static void
1030bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1031{
1032 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1033 bfa_trc(itnim->bfa, event);
1034
1035 switch (event) {
1036 case BFA_ITNIM_SM_FWRSP:
1037 case BFA_ITNIM_SM_HWFAIL:
1038 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1039 bfa_fcpim_delitn(itnim);
1040 break;
1041
1042 default:
1043 bfa_sm_fault(itnim->bfa, event);
1044 }
1045}
1046
1047static void
1048bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1049 enum bfa_itnim_event event)
1050{
1051 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1052 bfa_trc(itnim->bfa, event);
1053
1054 switch (event) {
1055 case BFA_ITNIM_SM_QRESUME:
1056 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1057 bfa_itnim_send_fwdelete(itnim);
1058 break;
1059
1060 case BFA_ITNIM_SM_HWFAIL:
1061 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1062 bfa_reqq_wcancel(&itnim->reqq_wait);
1063 bfa_fcpim_delitn(itnim);
1064 break;
1065
1066 default:
1067 bfa_sm_fault(itnim->bfa, event);
1068 }
1069}
1070
5fbe25c7 1071/*
a36c61f9
KG
1072 * Initiate cleanup of all IOs on an IOC failure.
1073 */
1074static void
1075bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1076{
1077 struct bfa_tskim_s *tskim;
1078 struct bfa_ioim_s *ioim;
1079 struct list_head *qe, *qen;
1080
1081 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1082 tskim = (struct bfa_tskim_s *) qe;
1083 bfa_tskim_iocdisable(tskim);
1084 }
1085
1086 list_for_each_safe(qe, qen, &itnim->io_q) {
1087 ioim = (struct bfa_ioim_s *) qe;
1088 bfa_ioim_iocdisable(ioim);
1089 }
1090
5fbe25c7 1091 /*
a36c61f9
KG
1092 * For IO request in pending queue, we pretend an early timeout.
1093 */
1094 list_for_each_safe(qe, qen, &itnim->pending_q) {
1095 ioim = (struct bfa_ioim_s *) qe;
1096 bfa_ioim_tov(ioim);
1097 }
1098
1099 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1100 ioim = (struct bfa_ioim_s *) qe;
1101 bfa_ioim_iocdisable(ioim);
1102 }
1103}
1104
5fbe25c7 1105/*
a36c61f9
KG
1106 * IO cleanup completion
1107 */
1108static void
1109bfa_itnim_cleanp_comp(void *itnim_cbarg)
1110{
1111 struct bfa_itnim_s *itnim = itnim_cbarg;
1112
1113 bfa_stats(itnim, cleanup_comps);
1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1115}
1116
5fbe25c7 1117/*
a36c61f9
KG
1118 * Initiate cleanup of all IOs.
1119 */
1120static void
1121bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1122{
1123 struct bfa_ioim_s *ioim;
1124 struct bfa_tskim_s *tskim;
1125 struct list_head *qe, *qen;
1126
1127 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1128
1129 list_for_each_safe(qe, qen, &itnim->io_q) {
1130 ioim = (struct bfa_ioim_s *) qe;
1131
5fbe25c7 1132 /*
a36c61f9
KG
1133 * Move IO to a cleanup queue from active queue so that a later
1134 * TM will not pickup this IO.
1135 */
1136 list_del(&ioim->qe);
1137 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1138
1139 bfa_wc_up(&itnim->wc);
1140 bfa_ioim_cleanup(ioim);
1141 }
1142
1143 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1144 tskim = (struct bfa_tskim_s *) qe;
1145 bfa_wc_up(&itnim->wc);
1146 bfa_tskim_cleanup(tskim);
1147 }
1148
1149 bfa_wc_wait(&itnim->wc);
1150}
1151
1152static void
1153__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1154{
1155 struct bfa_itnim_s *itnim = cbarg;
1156
1157 if (complete)
1158 bfa_cb_itnim_online(itnim->ditn);
1159}
1160
1161static void
1162__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1163{
1164 struct bfa_itnim_s *itnim = cbarg;
1165
1166 if (complete)
1167 bfa_cb_itnim_offline(itnim->ditn);
1168}
1169
1170static void
1171__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1172{
1173 struct bfa_itnim_s *itnim = cbarg;
1174
1175 if (complete)
1176 bfa_cb_itnim_sler(itnim->ditn);
1177}
1178
5fbe25c7 1179/*
a36c61f9
KG
1180 * Call to resume any I/O requests waiting for room in request queue.
1181 */
1182static void
1183bfa_itnim_qresume(void *cbarg)
1184{
1185 struct bfa_itnim_s *itnim = cbarg;
1186
1187 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1188}
1189
1190
1191
1192
5fbe25c7 1193/*
a36c61f9
KG
1194 * bfa_itnim_public
1195 */
1196
1197void
1198bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1199{
1200 bfa_wc_down(&itnim->wc);
1201}
1202
1203void
1204bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1205{
1206 bfa_wc_down(&itnim->wc);
1207}
1208
1209void
1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1211 u32 *dm_len)
1212{
5fbe25c7 1213 /*
a36c61f9
KG
1214 * ITN memory
1215 */
1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1217}
1218
1219void
1220bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1221{
1222 struct bfa_s *bfa = fcpim->bfa;
1223 struct bfa_itnim_s *itnim;
1224 int i, j;
1225
1226 INIT_LIST_HEAD(&fcpim->itnim_q);
1227
1228 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1229 fcpim->itnim_arr = itnim;
1230
1231 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
6a18b167 1232 memset(itnim, 0, sizeof(struct bfa_itnim_s));
a36c61f9
KG
1233 itnim->bfa = bfa;
1234 itnim->fcpim = fcpim;
1235 itnim->reqq = BFA_REQQ_QOS_LO;
1236 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1237 itnim->iotov_active = BFA_FALSE;
1238 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1239
1240 INIT_LIST_HEAD(&itnim->io_q);
1241 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1242 INIT_LIST_HEAD(&itnim->pending_q);
1243 INIT_LIST_HEAD(&itnim->tsk_q);
1244 INIT_LIST_HEAD(&itnim->delay_comp_q);
1245 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1246 itnim->ioprofile.io_latency.min[j] = ~0;
1247 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1248 }
1249
1250 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1251}
1252
1253void
1254bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1255{
1256 bfa_stats(itnim, ioc_disabled);
1257 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1258}
1259
1260static bfa_boolean_t
1261bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1262{
1263 struct bfi_itnim_create_req_s *m;
1264
1265 itnim->msg_no++;
1266
5fbe25c7 1267 /*
a36c61f9
KG
1268 * check for room in queue to send request now
1269 */
1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1271 if (!m) {
1272 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1273 return BFA_FALSE;
1274 }
1275
1276 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1277 bfa_lpuid(itnim->bfa));
1278 m->fw_handle = itnim->rport->fw_handle;
1279 m->class = FC_CLASS_3;
1280 m->seq_rec = itnim->seq_rec;
1281 m->msg_no = itnim->msg_no;
1282 bfa_stats(itnim, fw_create);
1283
5fbe25c7 1284 /*
a36c61f9
KG
1285 * queue I/O message to firmware
1286 */
1287 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1288 return BFA_TRUE;
1289}
1290
1291static bfa_boolean_t
1292bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1293{
1294 struct bfi_itnim_delete_req_s *m;
1295
5fbe25c7 1296 /*
a36c61f9
KG
1297 * check for room in queue to send request now
1298 */
1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1300 if (!m) {
1301 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1302 return BFA_FALSE;
1303 }
1304
1305 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1306 bfa_lpuid(itnim->bfa));
1307 m->fw_handle = itnim->rport->fw_handle;
1308 bfa_stats(itnim, fw_delete);
1309
5fbe25c7 1310 /*
a36c61f9
KG
1311 * queue I/O message to firmware
1312 */
1313 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1314 return BFA_TRUE;
1315}
1316
5fbe25c7 1317/*
a36c61f9
KG
1318 * Cleanup all pending failed inflight requests.
1319 */
1320static void
1321bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1322{
1323 struct bfa_ioim_s *ioim;
1324 struct list_head *qe, *qen;
1325
1326 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1327 ioim = (struct bfa_ioim_s *)qe;
1328 bfa_ioim_delayed_comp(ioim, iotov);
1329 }
1330}
1331
5fbe25c7 1332/*
a36c61f9
KG
1333 * Start all pending IO requests.
1334 */
1335static void
1336bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1337{
1338 struct bfa_ioim_s *ioim;
1339
1340 bfa_itnim_iotov_stop(itnim);
1341
5fbe25c7 1342 /*
a36c61f9
KG
1343 * Abort all inflight IO requests in the queue
1344 */
1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1346
5fbe25c7 1347 /*
a36c61f9
KG
1348 * Start all pending IO requests.
1349 */
1350 while (!list_empty(&itnim->pending_q)) {
1351 bfa_q_deq(&itnim->pending_q, &ioim);
1352 list_add_tail(&ioim->qe, &itnim->io_q);
1353 bfa_ioim_start(ioim);
1354 }
1355}
1356
5fbe25c7 1357/*
a36c61f9
KG
1358 * Fail all pending IO requests
1359 */
1360static void
1361bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1362{
1363 struct bfa_ioim_s *ioim;
1364
5fbe25c7 1365 /*
a36c61f9
KG
1366 * Fail all inflight IO requests in the queue
1367 */
1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1369
5fbe25c7 1370 /*
a36c61f9
KG
1371 * Fail any pending IO requests.
1372 */
1373 while (!list_empty(&itnim->pending_q)) {
1374 bfa_q_deq(&itnim->pending_q, &ioim);
1375 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1376 bfa_ioim_tov(ioim);
1377 }
1378}
1379
5fbe25c7 1380/*
a36c61f9
KG
1381 * IO TOV timer callback. Fail any pending IO requests.
1382 */
1383static void
1384bfa_itnim_iotov(void *itnim_arg)
1385{
1386 struct bfa_itnim_s *itnim = itnim_arg;
1387
1388 itnim->iotov_active = BFA_FALSE;
1389
1390 bfa_cb_itnim_tov_begin(itnim->ditn);
1391 bfa_itnim_iotov_cleanup(itnim);
1392 bfa_cb_itnim_tov(itnim->ditn);
1393}
1394
5fbe25c7 1395/*
a36c61f9
KG
1396 * Start IO TOV timer for failing back pending IO requests in offline state.
1397 */
1398static void
1399bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1400{
1401 if (itnim->fcpim->path_tov > 0) {
1402
1403 itnim->iotov_active = BFA_TRUE;
1404 bfa_assert(bfa_itnim_hold_io(itnim));
1405 bfa_timer_start(itnim->bfa, &itnim->timer,
1406 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1407 }
1408}
1409
5fbe25c7 1410/*
a36c61f9
KG
1411 * Stop IO TOV timer.
1412 */
1413static void
1414bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1415{
1416 if (itnim->iotov_active) {
1417 itnim->iotov_active = BFA_FALSE;
1418 bfa_timer_stop(&itnim->timer);
1419 }
1420}
1421
5fbe25c7 1422/*
a36c61f9
KG
1423 * Stop IO TOV timer.
1424 */
1425static void
1426bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1427{
1428 bfa_boolean_t pathtov_active = BFA_FALSE;
1429
1430 if (itnim->iotov_active)
1431 pathtov_active = BFA_TRUE;
1432
1433 bfa_itnim_iotov_stop(itnim);
1434 if (pathtov_active)
1435 bfa_cb_itnim_tov_begin(itnim->ditn);
1436 bfa_itnim_iotov_cleanup(itnim);
1437 if (pathtov_active)
1438 bfa_cb_itnim_tov(itnim->ditn);
1439}
1440
1441static void
1442bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1443{
1444 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1445 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1446 itnim->stats.iocomp_aborted;
1447 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1448 itnim->stats.iocomp_timedout;
1449 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1450 itnim->stats.iocom_sqer_needed;
1451 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1452 itnim->stats.iocom_res_free;
1453 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1454 itnim->stats.iocom_hostabrts;
1455 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1456 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1457 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1458}
1459
1460
1461
5fbe25c7 1462/*
a36c61f9
KG
1463 * bfa_itnim_public
1464 */
1465
5fbe25c7 1466/*
a36c61f9
KG
1467 * Itnim interrupt processing.
1468 */
1469void
1470bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1471{
1472 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1473 union bfi_itnim_i2h_msg_u msg;
1474 struct bfa_itnim_s *itnim;
1475
1476 bfa_trc(bfa, m->mhdr.msg_id);
1477
1478 msg.msg = m;
1479
1480 switch (m->mhdr.msg_id) {
1481 case BFI_ITNIM_I2H_CREATE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.create_rsp->bfa_handle);
1484 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1485 bfa_stats(itnim, create_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break;
1488
1489 case BFI_ITNIM_I2H_DELETE_RSP:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.delete_rsp->bfa_handle);
1492 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1493 bfa_stats(itnim, delete_comps);
1494 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1495 break;
1496
1497 case BFI_ITNIM_I2H_SLER_EVENT:
1498 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1499 msg.sler_event->bfa_handle);
1500 bfa_stats(itnim, sler_events);
1501 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1502 break;
1503
1504 default:
1505 bfa_trc(bfa, m->mhdr.msg_id);
1506 bfa_assert(0);
1507 }
1508}
1509
1510
1511
5fbe25c7 1512/*
a36c61f9
KG
1513 * bfa_itnim_api
1514 */
1515
1516struct bfa_itnim_s *
1517bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1518{
1519 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1520 struct bfa_itnim_s *itnim;
1521
1522 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1523 bfa_assert(itnim->rport == rport);
1524
1525 itnim->ditn = ditn;
1526
1527 bfa_stats(itnim, creates);
1528 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1529
1530 return itnim;
1531}
1532
1533void
1534bfa_itnim_delete(struct bfa_itnim_s *itnim)
1535{
1536 bfa_stats(itnim, deletes);
1537 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1538}
1539
1540void
1541bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1542{
1543 itnim->seq_rec = seq_rec;
1544 bfa_stats(itnim, onlines);
1545 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1546}
1547
1548void
1549bfa_itnim_offline(struct bfa_itnim_s *itnim)
1550{
1551 bfa_stats(itnim, offlines);
1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1553}
1554
5fbe25c7 1555/*
a36c61f9
KG
1556 * Return true if itnim is considered offline for holding off IO request.
1557 * IO is not held if itnim is being deleted.
1558 */
1559bfa_boolean_t
1560bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1561{
1562 return itnim->fcpim->path_tov && itnim->iotov_active &&
1563 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1564 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1565 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1566 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1567 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1568 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1569}
1570
1571bfa_status_t
1572bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1573 struct bfa_itnim_ioprofile_s *ioprofile)
1574{
1575 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1576 if (!fcpim->io_profile)
1577 return BFA_STATUS_IOPROFILE_OFF;
1578
1579 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1580 itnim->ioprofile.io_profile_start_time =
1581 bfa_io_profile_start_time(itnim->bfa);
1582 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1583 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1584 *ioprofile = itnim->ioprofile;
1585
1586 return BFA_STATUS_OK;
1587}
1588
1589void
1590bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
1591 struct bfa_itnim_iostats_s *stats)
1592{
1593 *stats = itnim->stats;
1594}
1595
1596void
1597bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1598{
1599 int j;
6a18b167
JH
1600 memset(&itnim->stats, 0, sizeof(itnim->stats));
1601 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
a36c61f9
KG
1602 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1603 itnim->ioprofile.io_latency.min[j] = ~0;
1604}
1605
5fbe25c7 1606/*
a36c61f9
KG
1607 * BFA IO module state machine functions
1608 */
1609
5fbe25c7 1610/*
a36c61f9
KG
1611 * IO is not started (unallocated).
1612 */
1613static void
1614bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615{
1616 bfa_trc_fp(ioim->bfa, ioim->iotag);
1617 bfa_trc_fp(ioim->bfa, event);
1618
1619 switch (event) {
1620 case BFA_IOIM_SM_START:
1621 if (!bfa_itnim_is_online(ioim->itnim)) {
1622 if (!bfa_itnim_hold_io(ioim->itnim)) {
1623 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1624 list_del(&ioim->qe);
1625 list_add_tail(&ioim->qe,
1626 &ioim->fcpim->ioim_comp_q);
1627 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1628 __bfa_cb_ioim_pathtov, ioim);
1629 } else {
1630 list_del(&ioim->qe);
1631 list_add_tail(&ioim->qe,
1632 &ioim->itnim->pending_q);
1633 }
1634 break;
1635 }
1636
1637 if (ioim->nsges > BFI_SGE_INLINE) {
1638 if (!bfa_ioim_sge_setup(ioim)) {
1639 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1640 return;
1641 }
1642 }
1643
1644 if (!bfa_ioim_send_ioreq(ioim)) {
1645 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1646 break;
1647 }
1648
1649 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1650 break;
1651
1652 case BFA_IOIM_SM_IOTOV:
1653 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1654 bfa_ioim_move_to_comp_q(ioim);
1655 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1656 __bfa_cb_ioim_pathtov, ioim);
1657 break;
1658
1659 case BFA_IOIM_SM_ABORT:
5fbe25c7 1660 /*
a36c61f9
KG
1661 * IO in pending queue can get abort requests. Complete abort
1662 * requests immediately.
1663 */
1664 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1666 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667 __bfa_cb_ioim_abort, ioim);
1668 break;
1669
1670 default:
1671 bfa_sm_fault(ioim->bfa, event);
1672 }
1673}
1674
5fbe25c7 1675/*
a36c61f9
KG
1676 * IO is waiting for SG pages.
1677 */
1678static void
1679bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1680{
1681 bfa_trc(ioim->bfa, ioim->iotag);
1682 bfa_trc(ioim->bfa, event);
1683
1684 switch (event) {
1685 case BFA_IOIM_SM_SGALLOCED:
1686 if (!bfa_ioim_send_ioreq(ioim)) {
1687 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1688 break;
1689 }
1690 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1691 break;
1692
1693 case BFA_IOIM_SM_CLEANUP:
1694 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1695 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1696 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1697 ioim);
1698 bfa_ioim_notify_cleanup(ioim);
1699 break;
1700
1701 case BFA_IOIM_SM_ABORT:
1702 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1703 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1704 bfa_ioim_move_to_comp_q(ioim);
1705 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1706 ioim);
1707 break;
1708
1709 case BFA_IOIM_SM_HWFAIL:
1710 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1711 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1712 bfa_ioim_move_to_comp_q(ioim);
1713 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714 ioim);
1715 break;
1716
1717 default:
1718 bfa_sm_fault(ioim->bfa, event);
1719 }
1720}
1721
5fbe25c7 1722/*
a36c61f9
KG
1723 * IO is active.
1724 */
1725static void
1726bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1727{
1728 bfa_trc_fp(ioim->bfa, ioim->iotag);
1729 bfa_trc_fp(ioim->bfa, event);
1730
1731 switch (event) {
1732 case BFA_IOIM_SM_COMP_GOOD:
1733 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1734 bfa_ioim_move_to_comp_q(ioim);
1735 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1736 __bfa_cb_ioim_good_comp, ioim);
1737 break;
1738
1739 case BFA_IOIM_SM_COMP:
1740 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1741 bfa_ioim_move_to_comp_q(ioim);
1742 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1743 ioim);
1744 break;
1745
1746 case BFA_IOIM_SM_DONE:
1747 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1748 bfa_ioim_move_to_comp_q(ioim);
1749 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1750 ioim);
1751 break;
1752
1753 case BFA_IOIM_SM_ABORT:
1754 ioim->iosp->abort_explicit = BFA_TRUE;
1755 ioim->io_cbfn = __bfa_cb_ioim_abort;
1756
1757 if (bfa_ioim_send_abort(ioim))
1758 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1759 else {
1760 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1761 bfa_stats(ioim->itnim, qwait);
1762 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1763 &ioim->iosp->reqq_wait);
1764 }
1765 break;
1766
1767 case BFA_IOIM_SM_CLEANUP:
1768 ioim->iosp->abort_explicit = BFA_FALSE;
1769 ioim->io_cbfn = __bfa_cb_ioim_failed;
1770
1771 if (bfa_ioim_send_abort(ioim))
1772 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1773 else {
1774 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1775 bfa_stats(ioim->itnim, qwait);
1776 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1777 &ioim->iosp->reqq_wait);
1778 }
1779 break;
1780
1781 case BFA_IOIM_SM_HWFAIL:
1782 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1783 bfa_ioim_move_to_comp_q(ioim);
1784 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1785 ioim);
1786 break;
1787
1788 case BFA_IOIM_SM_SQRETRY:
1789 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1790 /* max retry completed free IO */
1791 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1792 bfa_ioim_move_to_comp_q(ioim);
1793 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1794 __bfa_cb_ioim_failed, ioim);
1795 break;
1796 }
1797 /* waiting for IO tag resource free */
1798 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1799 break;
1800
1801 default:
1802 bfa_sm_fault(ioim->bfa, event);
1803 }
1804}
1805
5fbe25c7 1806/*
a36c61f9
KG
1807* IO is retried with new tag.
1808*/
1809static void
1810bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811{
1812 bfa_trc_fp(ioim->bfa, ioim->iotag);
1813 bfa_trc_fp(ioim->bfa, event);
1814
1815 switch (event) {
1816 case BFA_IOIM_SM_FREE:
1817 /* abts and rrq done. Now retry the IO with new tag */
1818 if (!bfa_ioim_send_ioreq(ioim)) {
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1820 break;
1821 }
1822 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1823 break;
1824
1825 case BFA_IOIM_SM_CLEANUP:
1826 ioim->iosp->abort_explicit = BFA_FALSE;
1827 ioim->io_cbfn = __bfa_cb_ioim_failed;
1828
1829 if (bfa_ioim_send_abort(ioim))
1830 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1831 else {
1832 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1833 bfa_stats(ioim->itnim, qwait);
1834 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1835 &ioim->iosp->reqq_wait);
1836 }
1837 break;
1838
1839 case BFA_IOIM_SM_HWFAIL:
1840 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1841 bfa_ioim_move_to_comp_q(ioim);
1842 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1843 __bfa_cb_ioim_failed, ioim);
1844 break;
1845
1846 case BFA_IOIM_SM_ABORT:
5fbe25c7 1847 /* in this state IO abort is done.
a36c61f9
KG
1848 * Waiting for IO tag resource free.
1849 */
1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1851 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1852 ioim);
1853 break;
1854
1855 default:
1856 bfa_sm_fault(ioim->bfa, event);
1857 }
1858}
1859
5fbe25c7 1860/*
a36c61f9
KG
1861 * IO is being aborted, waiting for completion from firmware.
1862 */
1863static void
1864bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1865{
1866 bfa_trc(ioim->bfa, ioim->iotag);
1867 bfa_trc(ioim->bfa, event);
1868
1869 switch (event) {
1870 case BFA_IOIM_SM_COMP_GOOD:
1871 case BFA_IOIM_SM_COMP:
1872 case BFA_IOIM_SM_DONE:
1873 case BFA_IOIM_SM_FREE:
1874 break;
1875
1876 case BFA_IOIM_SM_ABORT_DONE:
1877 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1878 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1879 ioim);
1880 break;
1881
1882 case BFA_IOIM_SM_ABORT_COMP:
1883 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1884 bfa_ioim_move_to_comp_q(ioim);
1885 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1886 ioim);
1887 break;
1888
1889 case BFA_IOIM_SM_COMP_UTAG:
1890 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891 bfa_ioim_move_to_comp_q(ioim);
1892 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1893 ioim);
1894 break;
1895
1896 case BFA_IOIM_SM_CLEANUP:
1897 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1898 ioim->iosp->abort_explicit = BFA_FALSE;
1899
1900 if (bfa_ioim_send_abort(ioim))
1901 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1902 else {
1903 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1904 bfa_stats(ioim->itnim, qwait);
1905 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1906 &ioim->iosp->reqq_wait);
1907 }
1908 break;
1909
1910 case BFA_IOIM_SM_HWFAIL:
1911 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1912 bfa_ioim_move_to_comp_q(ioim);
1913 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1914 ioim);
1915 break;
1916
1917 default:
1918 bfa_sm_fault(ioim->bfa, event);
1919 }
1920}
1921
5fbe25c7 1922/*
a36c61f9
KG
1923 * IO is being cleaned up (implicit abort), waiting for completion from
1924 * firmware.
1925 */
1926static void
1927bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1928{
1929 bfa_trc(ioim->bfa, ioim->iotag);
1930 bfa_trc(ioim->bfa, event);
1931
1932 switch (event) {
1933 case BFA_IOIM_SM_COMP_GOOD:
1934 case BFA_IOIM_SM_COMP:
1935 case BFA_IOIM_SM_DONE:
1936 case BFA_IOIM_SM_FREE:
1937 break;
1938
1939 case BFA_IOIM_SM_ABORT:
5fbe25c7 1940 /*
a36c61f9
KG
1941 * IO is already being aborted implicitly
1942 */
1943 ioim->io_cbfn = __bfa_cb_ioim_abort;
1944 break;
1945
1946 case BFA_IOIM_SM_ABORT_DONE:
1947 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1948 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1949 bfa_ioim_notify_cleanup(ioim);
1950 break;
1951
1952 case BFA_IOIM_SM_ABORT_COMP:
1953 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1954 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1955 bfa_ioim_notify_cleanup(ioim);
1956 break;
1957
1958 case BFA_IOIM_SM_COMP_UTAG:
1959 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1960 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1961 bfa_ioim_notify_cleanup(ioim);
1962 break;
1963
1964 case BFA_IOIM_SM_HWFAIL:
1965 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1966 bfa_ioim_move_to_comp_q(ioim);
1967 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1968 ioim);
1969 break;
1970
1971 case BFA_IOIM_SM_CLEANUP:
5fbe25c7 1972 /*
a36c61f9
KG
1973 * IO can be in cleanup state already due to TM command.
1974 * 2nd cleanup request comes from ITN offline event.
1975 */
1976 break;
1977
1978 default:
1979 bfa_sm_fault(ioim->bfa, event);
1980 }
1981}
1982
5fbe25c7 1983/*
a36c61f9
KG
1984 * IO is waiting for room in request CQ
1985 */
1986static void
1987bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1988{
1989 bfa_trc(ioim->bfa, ioim->iotag);
1990 bfa_trc(ioim->bfa, event);
1991
1992 switch (event) {
1993 case BFA_IOIM_SM_QRESUME:
1994 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1995 bfa_ioim_send_ioreq(ioim);
1996 break;
1997
1998 case BFA_IOIM_SM_ABORT:
1999 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2000 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001 bfa_ioim_move_to_comp_q(ioim);
2002 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2003 ioim);
2004 break;
2005
2006 case BFA_IOIM_SM_CLEANUP:
2007 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2008 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2010 ioim);
2011 bfa_ioim_notify_cleanup(ioim);
2012 break;
2013
2014 case BFA_IOIM_SM_HWFAIL:
2015 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2016 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2017 bfa_ioim_move_to_comp_q(ioim);
2018 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2019 ioim);
2020 break;
2021
2022 default:
2023 bfa_sm_fault(ioim->bfa, event);
2024 }
2025}
2026
5fbe25c7 2027/*
a36c61f9
KG
2028 * Active IO is being aborted, waiting for room in request CQ.
2029 */
2030static void
2031bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2032{
2033 bfa_trc(ioim->bfa, ioim->iotag);
2034 bfa_trc(ioim->bfa, event);
2035
2036 switch (event) {
2037 case BFA_IOIM_SM_QRESUME:
2038 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2039 bfa_ioim_send_abort(ioim);
2040 break;
2041
2042 case BFA_IOIM_SM_CLEANUP:
2043 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2044 ioim->iosp->abort_explicit = BFA_FALSE;
2045 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2046 break;
2047
2048 case BFA_IOIM_SM_COMP_GOOD:
2049 case BFA_IOIM_SM_COMP:
2050 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2051 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2052 bfa_ioim_move_to_comp_q(ioim);
2053 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2054 ioim);
2055 break;
2056
2057 case BFA_IOIM_SM_DONE:
2058 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2059 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2060 bfa_ioim_move_to_comp_q(ioim);
2061 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2062 ioim);
2063 break;
2064
2065 case BFA_IOIM_SM_HWFAIL:
2066 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2067 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2068 bfa_ioim_move_to_comp_q(ioim);
2069 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2070 ioim);
2071 break;
2072
2073 default:
2074 bfa_sm_fault(ioim->bfa, event);
2075 }
2076}
2077
5fbe25c7 2078/*
a36c61f9
KG
2079 * Active IO is being cleaned up, waiting for room in request CQ.
2080 */
2081static void
2082bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2083{
2084 bfa_trc(ioim->bfa, ioim->iotag);
2085 bfa_trc(ioim->bfa, event);
2086
2087 switch (event) {
2088 case BFA_IOIM_SM_QRESUME:
2089 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2090 bfa_ioim_send_abort(ioim);
2091 break;
2092
2093 case BFA_IOIM_SM_ABORT:
5fbe25c7 2094 /*
a36c61f9
KG
2095 * IO is alraedy being cleaned up implicitly
2096 */
2097 ioim->io_cbfn = __bfa_cb_ioim_abort;
2098 break;
2099
2100 case BFA_IOIM_SM_COMP_GOOD:
2101 case BFA_IOIM_SM_COMP:
2102 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2103 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2104 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2105 bfa_ioim_notify_cleanup(ioim);
2106 break;
2107
2108 case BFA_IOIM_SM_DONE:
2109 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2110 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2111 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2112 bfa_ioim_notify_cleanup(ioim);
2113 break;
2114
2115 case BFA_IOIM_SM_HWFAIL:
2116 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2117 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2118 bfa_ioim_move_to_comp_q(ioim);
2119 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2120 ioim);
2121 break;
2122
2123 default:
2124 bfa_sm_fault(ioim->bfa, event);
2125 }
2126}
2127
5fbe25c7 2128/*
a36c61f9
KG
2129 * IO bfa callback is pending.
2130 */
2131static void
2132bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2133{
2134 bfa_trc_fp(ioim->bfa, ioim->iotag);
2135 bfa_trc_fp(ioim->bfa, event);
2136
2137 switch (event) {
2138 case BFA_IOIM_SM_HCB:
2139 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2140 bfa_ioim_free(ioim);
2141 break;
2142
2143 case BFA_IOIM_SM_CLEANUP:
2144 bfa_ioim_notify_cleanup(ioim);
2145 break;
2146
2147 case BFA_IOIM_SM_HWFAIL:
2148 break;
2149
2150 default:
2151 bfa_sm_fault(ioim->bfa, event);
2152 }
2153}
2154
5fbe25c7 2155/*
a36c61f9
KG
2156 * IO bfa callback is pending. IO resource cannot be freed.
2157 */
2158static void
2159bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2160{
2161 bfa_trc(ioim->bfa, ioim->iotag);
2162 bfa_trc(ioim->bfa, event);
2163
2164 switch (event) {
2165 case BFA_IOIM_SM_HCB:
2166 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2167 list_del(&ioim->qe);
2168 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2169 break;
2170
2171 case BFA_IOIM_SM_FREE:
2172 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2173 break;
2174
2175 case BFA_IOIM_SM_CLEANUP:
2176 bfa_ioim_notify_cleanup(ioim);
2177 break;
2178
2179 case BFA_IOIM_SM_HWFAIL:
2180 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2181 break;
2182
2183 default:
2184 bfa_sm_fault(ioim->bfa, event);
2185 }
2186}
2187
5fbe25c7 2188/*
a36c61f9
KG
2189 * IO is completed, waiting resource free from firmware.
2190 */
2191static void
2192bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2193{
2194 bfa_trc(ioim->bfa, ioim->iotag);
2195 bfa_trc(ioim->bfa, event);
2196
2197 switch (event) {
2198 case BFA_IOIM_SM_FREE:
2199 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2200 bfa_ioim_free(ioim);
2201 break;
2202
2203 case BFA_IOIM_SM_CLEANUP:
2204 bfa_ioim_notify_cleanup(ioim);
2205 break;
2206
2207 case BFA_IOIM_SM_HWFAIL:
2208 break;
2209
2210 default:
2211 bfa_sm_fault(ioim->bfa, event);
2212 }
2213}
2214
2215
2216
5fbe25c7 2217/*
a36c61f9
KG
2218 * hal_ioim_private
2219 */
2220
2221static void
2222__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2223{
2224 struct bfa_ioim_s *ioim = cbarg;
2225
2226 if (!complete) {
2227 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2228 return;
2229 }
2230
2231 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2232}
2233
2234static void
2235__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2236{
2237 struct bfa_ioim_s *ioim = cbarg;
2238 struct bfi_ioim_rsp_s *m;
2239 u8 *snsinfo = NULL;
2240 u8 sns_len = 0;
2241 s32 residue = 0;
2242
2243 if (!complete) {
2244 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2245 return;
2246 }
2247
2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2249 if (m->io_status == BFI_IOIM_STS_OK) {
5fbe25c7 2250 /*
a36c61f9
KG
2251 * setup sense information, if present
2252 */
2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2254 m->sns_len) {
2255 sns_len = m->sns_len;
2256 snsinfo = ioim->iosp->snsinfo;
2257 }
2258
5fbe25c7 2259 /*
a36c61f9
KG
2260 * setup residue value correctly for normal completions
2261 */
2262 if (m->resid_flags == FCP_RESID_UNDER) {
ba816ea8 2263 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2264 bfa_stats(ioim->itnim, iocomp_underrun);
2265 }
2266 if (m->resid_flags == FCP_RESID_OVER) {
ba816ea8 2267 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2268 residue = -residue;
2269 bfa_stats(ioim->itnim, iocomp_overrun);
2270 }
2271 }
2272
2273 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2274 m->scsi_status, sns_len, snsinfo, residue);
2275}
2276
2277static void
2278__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2279{
2280 struct bfa_ioim_s *ioim = cbarg;
2281
2282 if (!complete) {
2283 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2284 return;
2285 }
2286
2287 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2288 0, 0, NULL, 0);
2289}
2290
2291static void
2292__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2293{
2294 struct bfa_ioim_s *ioim = cbarg;
2295
2296 bfa_stats(ioim->itnim, path_tov_expired);
2297 if (!complete) {
2298 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2299 return;
2300 }
2301
2302 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2303 0, 0, NULL, 0);
2304}
2305
2306static void
2307__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2308{
2309 struct bfa_ioim_s *ioim = cbarg;
2310
2311 if (!complete) {
2312 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2313 return;
2314 }
2315
2316 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2317}
2318
2319static void
2320bfa_ioim_sgpg_alloced(void *cbarg)
2321{
2322 struct bfa_ioim_s *ioim = cbarg;
2323
2324 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2325 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2326 bfa_ioim_sgpg_setup(ioim);
2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328}
2329
5fbe25c7 2330/*
a36c61f9
KG
2331 * Send I/O request to firmware.
2332 */
2333static bfa_boolean_t
2334bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2335{
2336 struct bfa_itnim_s *itnim = ioim->itnim;
2337 struct bfi_ioim_req_s *m;
2338 static struct fcp_cmnd_s cmnd_z0 = { 0 };
2339 struct bfi_sge_s *sge;
2340 u32 pgdlen = 0;
2341 u32 fcp_dl;
2342 u64 addr;
2343 struct scatterlist *sg;
2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2345
5fbe25c7 2346 /*
a36c61f9
KG
2347 * check for room in queue to send request now
2348 */
2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2350 if (!m) {
2351 bfa_stats(ioim->itnim, qwait);
2352 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2353 &ioim->iosp->reqq_wait);
2354 return BFA_FALSE;
2355 }
2356
5fbe25c7 2357 /*
a36c61f9
KG
2358 * build i/o request message next
2359 */
ba816ea8 2360 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9
KG
2361 m->rport_hdl = ioim->itnim->rport->fw_handle;
2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2363
5fbe25c7 2364 /*
a36c61f9
KG
2365 * build inline IO SG element here
2366 */
2367 sge = &m->sges[0];
2368 if (ioim->nsges) {
2369 sg = (struct scatterlist *)scsi_sglist(cmnd);
2370 addr = bfa_os_sgaddr(sg_dma_address(sg));
2371 sge->sga = *(union bfi_addr_u *) &addr;
2372 pgdlen = sg_dma_len(sg);
2373 sge->sg_len = pgdlen;
2374 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2375 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2376 bfa_sge_to_be(sge);
2377 sge++;
2378 }
2379
2380 if (ioim->nsges > BFI_SGE_INLINE) {
2381 sge->sga = ioim->sgpg->sgpg_pa;
2382 } else {
2383 sge->sga.a32.addr_lo = 0;
2384 sge->sga.a32.addr_hi = 0;
2385 }
2386 sge->sg_len = pgdlen;
2387 sge->flags = BFI_SGE_PGDLEN;
2388 bfa_sge_to_be(sge);
2389
5fbe25c7 2390 /*
a36c61f9
KG
2391 * set up I/O command parameters
2392 */
6a18b167 2393 m->cmnd = cmnd_z0;
a36c61f9
KG
2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
6a18b167 2396 m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
a36c61f9 2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
ba816ea8 2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
a36c61f9 2399
5fbe25c7 2400 /*
a36c61f9
KG
2401 * set up I/O message header
2402 */
2403 switch (m->cmnd.iodir) {
2404 case FCP_IODIR_READ:
2405 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2406 bfa_stats(itnim, input_reqs);
2407 ioim->itnim->stats.rd_throughput += fcp_dl;
2408 break;
2409 case FCP_IODIR_WRITE:
2410 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2411 bfa_stats(itnim, output_reqs);
2412 ioim->itnim->stats.wr_throughput += fcp_dl;
2413 break;
2414 case FCP_IODIR_RW:
2415 bfa_stats(itnim, input_reqs);
2416 bfa_stats(itnim, output_reqs);
2417 default:
2418 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2419 }
2420 if (itnim->seq_rec ||
2421 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
2422 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2423
2424#ifdef IOIM_ADVANCED
2425 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2428
5fbe25c7 2429 /*
a36c61f9
KG
2430 * Handle large CDB (>16 bytes).
2431 */
2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
2433 FCP_CMND_CDB_LEN) / sizeof(u32);
2434 if (m->cmnd.addl_cdb_len) {
6a18b167 2435 memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
a36c61f9
KG
2436 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
2437 m->cmnd.addl_cdb_len * sizeof(u32));
2438 fcp_cmnd_fcpdl(&m->cmnd) =
ba816ea8 2439 cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
a36c61f9
KG
2440 }
2441#endif
2442
5fbe25c7 2443 /*
a36c61f9
KG
2444 * queue I/O message to firmware
2445 */
2446 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2447 return BFA_TRUE;
2448}
2449
5fbe25c7 2450/*
a36c61f9
KG
2451 * Setup any additional SG pages needed.Inline SG element is setup
2452 * at queuing time.
2453 */
2454static bfa_boolean_t
2455bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2456{
2457 u16 nsgpgs;
2458
2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2460
5fbe25c7 2461 /*
a36c61f9
KG
2462 * allocate SG pages needed
2463 */
2464 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2465 if (!nsgpgs)
2466 return BFA_TRUE;
2467
2468 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2469 != BFA_STATUS_OK) {
2470 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2471 return BFA_FALSE;
2472 }
2473
2474 ioim->nsgpgs = nsgpgs;
2475 bfa_ioim_sgpg_setup(ioim);
2476
2477 return BFA_TRUE;
2478}
2479
2480static void
2481bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2482{
2483 int sgeid, nsges, i;
2484 struct bfi_sge_s *sge;
2485 struct bfa_sgpg_s *sgpg;
2486 u32 pgcumsz;
2487 u64 addr;
2488 struct scatterlist *sg;
2489 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2490
2491 sgeid = BFI_SGE_INLINE;
2492 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
2493
2494 sg = scsi_sglist(cmnd);
2495 sg = sg_next(sg);
2496
2497 do {
2498 sge = sgpg->sgpg->sges;
2499 nsges = ioim->nsges - sgeid;
2500 if (nsges > BFI_SGPG_DATA_SGES)
2501 nsges = BFI_SGPG_DATA_SGES;
2502
2503 pgcumsz = 0;
2504 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
2505 addr = bfa_os_sgaddr(sg_dma_address(sg));
2506 sge->sga = *(union bfi_addr_u *) &addr;
2507 sge->sg_len = sg_dma_len(sg);
2508 pgcumsz += sge->sg_len;
2509
5fbe25c7 2510 /*
a36c61f9
KG
2511 * set flags
2512 */
2513 if (i < (nsges - 1))
2514 sge->flags = BFI_SGE_DATA;
2515 else if (sgeid < (ioim->nsges - 1))
2516 sge->flags = BFI_SGE_DATA_CPL;
2517 else
2518 sge->flags = BFI_SGE_DATA_LAST;
2519
2520 bfa_sge_to_le(sge);
2521 }
2522
2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2524
5fbe25c7 2525 /*
a36c61f9
KG
2526 * set the link element of each page
2527 */
2528 if (sgeid == ioim->nsges) {
2529 sge->flags = BFI_SGE_PGDLEN;
2530 sge->sga.a32.addr_lo = 0;
2531 sge->sga.a32.addr_hi = 0;
2532 } else {
2533 sge->flags = BFI_SGE_LINK;
2534 sge->sga = sgpg->sgpg_pa;
2535 }
2536 sge->sg_len = pgcumsz;
2537
2538 bfa_sge_to_le(sge);
2539 } while (sgeid < ioim->nsges);
2540}
2541
5fbe25c7 2542/*
a36c61f9
KG
2543 * Send I/O abort request to firmware.
2544 */
2545static bfa_boolean_t
2546bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2547{
2548 struct bfi_ioim_abort_req_s *m;
2549 enum bfi_ioim_h2i msgop;
2550
5fbe25c7 2551 /*
a36c61f9
KG
2552 * check for room in queue to send request now
2553 */
2554 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2555 if (!m)
2556 return BFA_FALSE;
2557
5fbe25c7 2558 /*
a36c61f9
KG
2559 * build i/o request message next
2560 */
2561 if (ioim->iosp->abort_explicit)
2562 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2563 else
2564 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2565
2566 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
ba816ea8 2567 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9
KG
2568 m->abort_tag = ++ioim->abort_tag;
2569
5fbe25c7 2570 /*
a36c61f9
KG
2571 * queue I/O message to firmware
2572 */
2573 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2574 return BFA_TRUE;
2575}
2576
5fbe25c7 2577/*
a36c61f9
KG
2578 * Call to resume any I/O requests waiting for room in request queue.
2579 */
2580static void
2581bfa_ioim_qresume(void *cbarg)
2582{
2583 struct bfa_ioim_s *ioim = cbarg;
2584
2585 bfa_stats(ioim->itnim, qresumes);
2586 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2587}
2588
2589
2590static void
2591bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2592{
5fbe25c7 2593 /*
a36c61f9
KG
2594 * Move IO from itnim queue to fcpim global queue since itnim will be
2595 * freed.
2596 */
2597 list_del(&ioim->qe);
2598 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2599
2600 if (!ioim->iosp->tskim) {
2601 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2602 bfa_cb_dequeue(&ioim->hcb_qe);
2603 list_del(&ioim->qe);
2604 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2605 }
2606 bfa_itnim_iodone(ioim->itnim);
2607 } else
2608 bfa_tskim_iodone(ioim->iosp->tskim);
2609}
2610
2611static bfa_boolean_t
2612bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2613{
2614 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2615 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2616 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2617 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2618 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2619 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2620 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2621 return BFA_FALSE;
2622
2623 return BFA_TRUE;
2624}
2625
5fbe25c7 2626/*
a36c61f9
KG
2627 * or after the link comes back.
2628 */
2629void
2630bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2631{
5fbe25c7 2632 /*
a36c61f9
KG
2633 * If path tov timer expired, failback with PATHTOV status - these
2634 * IO requests are not normally retried by IO stack.
2635 *
2636 * Otherwise device cameback online and fail it with normal failed
2637 * status so that IO stack retries these failed IO requests.
2638 */
2639 if (iotov)
2640 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2641 else {
2642 ioim->io_cbfn = __bfa_cb_ioim_failed;
2643 bfa_stats(ioim->itnim, iocom_nexus_abort);
2644 }
2645 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2646
5fbe25c7 2647 /*
a36c61f9
KG
2648 * Move IO to fcpim global queue since itnim will be
2649 * freed.
2650 */
2651 list_del(&ioim->qe);
2652 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2653}
2654
2655
2656
5fbe25c7 2657/*
a36c61f9
KG
2658 * hal_ioim_friend
2659 */
2660
5fbe25c7 2661/*
a36c61f9
KG
2662 * Memory allocation and initialization.
2663 */
2664void
2665bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2666{
2667 struct bfa_ioim_s *ioim;
2668 struct bfa_ioim_sp_s *iosp;
2669 u16 i;
2670 u8 *snsinfo;
2671 u32 snsbufsz;
2672
5fbe25c7 2673 /*
a36c61f9
KG
2674 * claim memory first
2675 */
2676 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2677 fcpim->ioim_arr = ioim;
2678 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2679
2680 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2681 fcpim->ioim_sp_arr = iosp;
2682 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2683
5fbe25c7 2684 /*
a36c61f9
KG
2685 * Claim DMA memory for per IO sense data.
2686 */
2687 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2688 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2689 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2690
2691 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2692 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2693 snsinfo = fcpim->snsbase.kva;
2694 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2695
5fbe25c7 2696 /*
a36c61f9
KG
2697 * Initialize ioim free queues
2698 */
2699 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2700 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2701 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2702
2703 for (i = 0; i < fcpim->num_ioim_reqs;
2704 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2705 /*
2706 * initialize IOIM
2707 */
6a18b167 2708 memset(ioim, 0, sizeof(struct bfa_ioim_s));
a36c61f9
KG
2709 ioim->iotag = i;
2710 ioim->bfa = fcpim->bfa;
2711 ioim->fcpim = fcpim;
2712 ioim->iosp = iosp;
2713 iosp->snsinfo = snsinfo;
2714 INIT_LIST_HEAD(&ioim->sgpg_q);
2715 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2716 bfa_ioim_qresume, ioim);
2717 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2718 bfa_ioim_sgpg_alloced, ioim);
2719 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2720
2721 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2722 }
2723}
2724
5fbe25c7 2725/*
a36c61f9
KG
2726 * Driver detach time call.
2727 */
2728void
2729bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
2730{
2731}
2732
2733void
2734bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2735{
2736 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2737 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2738 struct bfa_ioim_s *ioim;
2739 u16 iotag;
2740 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2741
ba816ea8 2742 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
2743
2744 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2745 bfa_assert(ioim->iotag == iotag);
2746
2747 bfa_trc(ioim->bfa, ioim->iotag);
2748 bfa_trc(ioim->bfa, rsp->io_status);
2749 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2750
2751 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
6a18b167 2752 ioim->iosp->comp_rspmsg = *m;
a36c61f9
KG
2753
2754 switch (rsp->io_status) {
2755 case BFI_IOIM_STS_OK:
2756 bfa_stats(ioim->itnim, iocomp_ok);
2757 if (rsp->reuse_io_tag == 0)
2758 evt = BFA_IOIM_SM_DONE;
2759 else
2760 evt = BFA_IOIM_SM_COMP;
2761 break;
2762
2763 case BFI_IOIM_STS_TIMEDOUT:
2764 bfa_stats(ioim->itnim, iocomp_timedout);
2765 case BFI_IOIM_STS_ABORTED:
2766 rsp->io_status = BFI_IOIM_STS_ABORTED;
2767 bfa_stats(ioim->itnim, iocomp_aborted);
2768 if (rsp->reuse_io_tag == 0)
2769 evt = BFA_IOIM_SM_DONE;
2770 else
2771 evt = BFA_IOIM_SM_COMP;
2772 break;
2773
2774 case BFI_IOIM_STS_PROTO_ERR:
2775 bfa_stats(ioim->itnim, iocom_proto_err);
2776 bfa_assert(rsp->reuse_io_tag);
2777 evt = BFA_IOIM_SM_COMP;
2778 break;
2779
2780 case BFI_IOIM_STS_SQER_NEEDED:
2781 bfa_stats(ioim->itnim, iocom_sqer_needed);
2782 bfa_assert(rsp->reuse_io_tag == 0);
2783 evt = BFA_IOIM_SM_SQRETRY;
2784 break;
2785
2786 case BFI_IOIM_STS_RES_FREE:
2787 bfa_stats(ioim->itnim, iocom_res_free);
2788 evt = BFA_IOIM_SM_FREE;
2789 break;
2790
2791 case BFI_IOIM_STS_HOST_ABORTED:
2792 bfa_stats(ioim->itnim, iocom_hostabrts);
2793 if (rsp->abort_tag != ioim->abort_tag) {
2794 bfa_trc(ioim->bfa, rsp->abort_tag);
2795 bfa_trc(ioim->bfa, ioim->abort_tag);
2796 return;
2797 }
2798
2799 if (rsp->reuse_io_tag)
2800 evt = BFA_IOIM_SM_ABORT_COMP;
2801 else
2802 evt = BFA_IOIM_SM_ABORT_DONE;
2803 break;
2804
2805 case BFI_IOIM_STS_UTAG:
2806 bfa_stats(ioim->itnim, iocom_utags);
2807 evt = BFA_IOIM_SM_COMP_UTAG;
2808 break;
2809
2810 default:
2811 bfa_assert(0);
2812 }
2813
2814 bfa_sm_send_event(ioim, evt);
2815}
2816
2817void
2818bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2819{
2820 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2821 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2822 struct bfa_ioim_s *ioim;
2823 u16 iotag;
2824
ba816ea8 2825 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
2826
2827 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2828 bfa_assert(ioim->iotag == iotag);
2829
2830 bfa_trc_fp(ioim->bfa, ioim->iotag);
2831 bfa_ioim_cb_profile_comp(fcpim, ioim);
2832
2833 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2834}
2835
2836void
2837bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2838{
6a18b167 2839 ioim->start_time = jiffies;
a36c61f9
KG
2840}
2841
2842void
2843bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2844{
2845 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2846 u32 index = bfa_ioim_get_index(fcp_dl);
6a18b167 2847 u64 end_time = jiffies;
a36c61f9
KG
2848 struct bfa_itnim_latency_s *io_lat =
2849 &(ioim->itnim->ioprofile.io_latency);
2850 u32 val = (u32)(end_time - ioim->start_time);
2851
2852 bfa_itnim_ioprofile_update(ioim->itnim, index);
2853
2854 io_lat->count[index]++;
2855 io_lat->min[index] = (io_lat->min[index] < val) ?
2856 io_lat->min[index] : val;
2857 io_lat->max[index] = (io_lat->max[index] > val) ?
2858 io_lat->max[index] : val;
2859 io_lat->avg[index] += val;
2860}
5fbe25c7 2861/*
a36c61f9
KG
2862 * Called by itnim to clean up IO while going offline.
2863 */
2864void
2865bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2866{
2867 bfa_trc(ioim->bfa, ioim->iotag);
2868 bfa_stats(ioim->itnim, io_cleanups);
2869
2870 ioim->iosp->tskim = NULL;
2871 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2872}
2873
2874void
2875bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2876{
2877 bfa_trc(ioim->bfa, ioim->iotag);
2878 bfa_stats(ioim->itnim, io_tmaborts);
2879
2880 ioim->iosp->tskim = tskim;
2881 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2882}
2883
5fbe25c7 2884/*
a36c61f9
KG
2885 * IOC failure handling.
2886 */
2887void
2888bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2889{
2890 bfa_trc(ioim->bfa, ioim->iotag);
2891 bfa_stats(ioim->itnim, io_iocdowns);
2892 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2893}
2894
5fbe25c7 2895/*
a36c61f9
KG
2896 * IO offline TOV popped. Fail the pending IO.
2897 */
2898void
2899bfa_ioim_tov(struct bfa_ioim_s *ioim)
2900{
2901 bfa_trc(ioim->bfa, ioim->iotag);
2902 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2903}
2904
2905
2906
5fbe25c7 2907/*
a36c61f9
KG
2908 * hal_ioim_api
2909 */
2910
5fbe25c7 2911/*
a36c61f9
KG
2912 * Allocate IOIM resource for initiator mode I/O request.
2913 */
2914struct bfa_ioim_s *
2915bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2916 struct bfa_itnim_s *itnim, u16 nsges)
2917{
2918 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2919 struct bfa_ioim_s *ioim;
2920
5fbe25c7 2921 /*
a36c61f9
KG
2922 * alocate IOIM resource
2923 */
2924 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2925 if (!ioim) {
2926 bfa_stats(itnim, no_iotags);
2927 return NULL;
2928 }
2929
2930 ioim->dio = dio;
2931 ioim->itnim = itnim;
2932 ioim->nsges = nsges;
2933 ioim->nsgpgs = 0;
2934
2935 bfa_stats(itnim, total_ios);
2936 fcpim->ios_active++;
2937
2938 list_add_tail(&ioim->qe, &itnim->io_q);
2939 bfa_trc_fp(ioim->bfa, ioim->iotag);
2940
2941 return ioim;
2942}
2943
2944void
2945bfa_ioim_free(struct bfa_ioim_s *ioim)
2946{
2947 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2948
2949 bfa_trc_fp(ioim->bfa, ioim->iotag);
2950 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2951
2952 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2953 (ioim->nsges > BFI_SGE_INLINE));
2954
2955 if (ioim->nsgpgs > 0)
2956 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2957
2958 bfa_stats(ioim->itnim, io_comps);
2959 fcpim->ios_active--;
2960
2961 list_del(&ioim->qe);
2962 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2963}
2964
2965void
2966bfa_ioim_start(struct bfa_ioim_s *ioim)
2967{
2968 bfa_trc_fp(ioim->bfa, ioim->iotag);
2969
2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2971
5fbe25c7 2972 /*
a36c61f9
KG
2973 * Obtain the queue over which this request has to be issued
2974 */
2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2976 bfa_cb_ioim_get_reqq(ioim->dio) :
2977 bfa_itnim_get_reqq(ioim);
2978
2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2980}
2981
5fbe25c7 2982/*
a36c61f9
KG
2983 * Driver I/O abort request.
2984 */
2985bfa_status_t
2986bfa_ioim_abort(struct bfa_ioim_s *ioim)
2987{
2988
2989 bfa_trc(ioim->bfa, ioim->iotag);
2990
2991 if (!bfa_ioim_is_abortable(ioim))
2992 return BFA_STATUS_FAILED;
2993
2994 bfa_stats(ioim->itnim, io_aborts);
2995 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2996
2997 return BFA_STATUS_OK;
2998}
2999
3000
5fbe25c7 3001/*
a36c61f9
KG
3002 * BFA TSKIM state machine functions
3003 */
3004
5fbe25c7 3005/*
a36c61f9
KG
3006 * Task management command beginning state.
3007 */
3008static void
3009bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3010{
3011 bfa_trc(tskim->bfa, event);
3012
3013 switch (event) {
3014 case BFA_TSKIM_SM_START:
3015 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3016 bfa_tskim_gather_ios(tskim);
3017
5fbe25c7 3018 /*
a36c61f9
KG
3019 * If device is offline, do not send TM on wire. Just cleanup
3020 * any pending IO requests and complete TM request.
3021 */
3022 if (!bfa_itnim_is_online(tskim->itnim)) {
3023 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3024 tskim->tsk_status = BFI_TSKIM_STS_OK;
3025 bfa_tskim_cleanup_ios(tskim);
3026 return;
3027 }
3028
3029 if (!bfa_tskim_send(tskim)) {
3030 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3031 bfa_stats(tskim->itnim, tm_qwait);
3032 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3033 &tskim->reqq_wait);
3034 }
3035 break;
3036
3037 default:
3038 bfa_sm_fault(tskim->bfa, event);
3039 }
3040}
3041
5fbe25c7 3042/*
a36c61f9
KG
3043 * brief
3044 * TM command is active, awaiting completion from firmware to
3045 * cleanup IO requests in TM scope.
3046 */
3047static void
3048bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3049{
3050 bfa_trc(tskim->bfa, event);
3051
3052 switch (event) {
3053 case BFA_TSKIM_SM_DONE:
3054 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3055 bfa_tskim_cleanup_ios(tskim);
3056 break;
3057
3058 case BFA_TSKIM_SM_CLEANUP:
3059 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3060 if (!bfa_tskim_send_abort(tskim)) {
3061 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3062 bfa_stats(tskim->itnim, tm_qwait);
3063 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3064 &tskim->reqq_wait);
3065 }
3066 break;
3067
3068 case BFA_TSKIM_SM_HWFAIL:
3069 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3070 bfa_tskim_iocdisable_ios(tskim);
3071 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3072 break;
3073
3074 default:
3075 bfa_sm_fault(tskim->bfa, event);
3076 }
3077}
3078
5fbe25c7 3079/*
a36c61f9
KG
3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3081 * completion event from firmware.
3082 */
3083static void
3084bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3085{
3086 bfa_trc(tskim->bfa, event);
3087
3088 switch (event) {
3089 case BFA_TSKIM_SM_DONE:
5fbe25c7 3090 /*
a36c61f9
KG
3091 * Ignore and wait for ABORT completion from firmware.
3092 */
3093 break;
3094
3095 case BFA_TSKIM_SM_CLEANUP_DONE:
3096 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3097 bfa_tskim_cleanup_ios(tskim);
3098 break;
3099
3100 case BFA_TSKIM_SM_HWFAIL:
3101 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3102 bfa_tskim_iocdisable_ios(tskim);
3103 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3104 break;
3105
3106 default:
3107 bfa_sm_fault(tskim->bfa, event);
3108 }
3109}
3110
3111static void
3112bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3113{
3114 bfa_trc(tskim->bfa, event);
3115
3116 switch (event) {
3117 case BFA_TSKIM_SM_IOS_DONE:
3118 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3119 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3120 break;
3121
3122 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3123 /*
a36c61f9
KG
3124 * Ignore, TM command completed on wire.
3125 * Notify TM conmpletion on IO cleanup completion.
3126 */
3127 break;
3128
3129 case BFA_TSKIM_SM_HWFAIL:
3130 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3131 bfa_tskim_iocdisable_ios(tskim);
3132 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3133 break;
3134
3135 default:
3136 bfa_sm_fault(tskim->bfa, event);
3137 }
3138}
3139
5fbe25c7 3140/*
a36c61f9
KG
3141 * Task management command is waiting for room in request CQ
3142 */
3143static void
3144bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3145{
3146 bfa_trc(tskim->bfa, event);
3147
3148 switch (event) {
3149 case BFA_TSKIM_SM_QRESUME:
3150 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3151 bfa_tskim_send(tskim);
3152 break;
3153
3154 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3155 /*
a36c61f9
KG
3156 * No need to send TM on wire since ITN is offline.
3157 */
3158 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3159 bfa_reqq_wcancel(&tskim->reqq_wait);
3160 bfa_tskim_cleanup_ios(tskim);
3161 break;
3162
3163 case BFA_TSKIM_SM_HWFAIL:
3164 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3165 bfa_reqq_wcancel(&tskim->reqq_wait);
3166 bfa_tskim_iocdisable_ios(tskim);
3167 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3168 break;
3169
3170 default:
3171 bfa_sm_fault(tskim->bfa, event);
3172 }
3173}
3174
5fbe25c7 3175/*
a36c61f9
KG
3176 * Task management command is active, awaiting for room in request CQ
3177 * to send clean up request.
3178 */
3179static void
3180bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3181 enum bfa_tskim_event event)
3182{
3183 bfa_trc(tskim->bfa, event);
3184
3185 switch (event) {
3186 case BFA_TSKIM_SM_DONE:
3187 bfa_reqq_wcancel(&tskim->reqq_wait);
5fbe25c7 3188 /*
a36c61f9
KG
3189 *
3190 * Fall through !!!
3191 */
3192
3193 case BFA_TSKIM_SM_QRESUME:
3194 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3195 bfa_tskim_send_abort(tskim);
3196 break;
3197
3198 case BFA_TSKIM_SM_HWFAIL:
3199 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3200 bfa_reqq_wcancel(&tskim->reqq_wait);
3201 bfa_tskim_iocdisable_ios(tskim);
3202 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3203 break;
3204
3205 default:
3206 bfa_sm_fault(tskim->bfa, event);
3207 }
3208}
3209
5fbe25c7 3210/*
a36c61f9
KG
3211 * BFA callback is pending
3212 */
3213static void
3214bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3215{
3216 bfa_trc(tskim->bfa, event);
3217
3218 switch (event) {
3219 case BFA_TSKIM_SM_HCB:
3220 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3221 bfa_tskim_free(tskim);
3222 break;
3223
3224 case BFA_TSKIM_SM_CLEANUP:
3225 bfa_tskim_notify_comp(tskim);
3226 break;
3227
3228 case BFA_TSKIM_SM_HWFAIL:
3229 break;
3230
3231 default:
3232 bfa_sm_fault(tskim->bfa, event);
3233 }
3234}
3235
3236
3237
5fbe25c7 3238/*
a36c61f9
KG
3239 * hal_tskim_private
3240 */
3241
3242static void
3243__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3244{
3245 struct bfa_tskim_s *tskim = cbarg;
3246
3247 if (!complete) {
3248 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3249 return;
3250 }
3251
3252 bfa_stats(tskim->itnim, tm_success);
3253 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3254}
3255
3256static void
3257__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3258{
3259 struct bfa_tskim_s *tskim = cbarg;
3260
3261 if (!complete) {
3262 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3263 return;
3264 }
3265
3266 bfa_stats(tskim->itnim, tm_failures);
3267 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3268 BFI_TSKIM_STS_FAILED);
3269}
3270
3271static bfa_boolean_t
3272bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3273{
3274 switch (tskim->tm_cmnd) {
3275 case FCP_TM_TARGET_RESET:
3276 return BFA_TRUE;
3277
3278 case FCP_TM_ABORT_TASK_SET:
3279 case FCP_TM_CLEAR_TASK_SET:
3280 case FCP_TM_LUN_RESET:
3281 case FCP_TM_CLEAR_ACA:
3282 return (tskim->lun == lun);
3283
3284 default:
3285 bfa_assert(0);
3286 }
3287
3288 return BFA_FALSE;
3289}
3290
5fbe25c7 3291/*
a36c61f9
KG
3292 * Gather affected IO requests and task management commands.
3293 */
3294static void
3295bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3296{
3297 struct bfa_itnim_s *itnim = tskim->itnim;
3298 struct bfa_ioim_s *ioim;
3299 struct list_head *qe, *qen;
3300
3301 INIT_LIST_HEAD(&tskim->io_q);
3302
5fbe25c7 3303 /*
a36c61f9
KG
3304 * Gather any active IO requests first.
3305 */
3306 list_for_each_safe(qe, qen, &itnim->io_q) {
3307 ioim = (struct bfa_ioim_s *) qe;
3308 if (bfa_tskim_match_scope
3309 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3310 list_del(&ioim->qe);
3311 list_add_tail(&ioim->qe, &tskim->io_q);
3312 }
3313 }
3314
5fbe25c7 3315 /*
a36c61f9
KG
3316 * Failback any pending IO requests immediately.
3317 */
3318 list_for_each_safe(qe, qen, &itnim->pending_q) {
3319 ioim = (struct bfa_ioim_s *) qe;
3320 if (bfa_tskim_match_scope
3321 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3322 list_del(&ioim->qe);
3323 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3324 bfa_ioim_tov(ioim);
3325 }
3326 }
3327}
3328
5fbe25c7 3329/*
a36c61f9
KG
3330 * IO cleanup completion
3331 */
3332static void
3333bfa_tskim_cleanp_comp(void *tskim_cbarg)
3334{
3335 struct bfa_tskim_s *tskim = tskim_cbarg;
3336
3337 bfa_stats(tskim->itnim, tm_io_comps);
3338 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3339}
3340
5fbe25c7 3341/*
a36c61f9
KG
3342 * Gather affected IO requests and task management commands.
3343 */
3344static void
3345bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3346{
3347 struct bfa_ioim_s *ioim;
3348 struct list_head *qe, *qen;
3349
3350 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3351
3352 list_for_each_safe(qe, qen, &tskim->io_q) {
3353 ioim = (struct bfa_ioim_s *) qe;
3354 bfa_wc_up(&tskim->wc);
3355 bfa_ioim_cleanup_tm(ioim, tskim);
3356 }
3357
3358 bfa_wc_wait(&tskim->wc);
3359}
3360
5fbe25c7 3361/*
a36c61f9
KG
3362 * Send task management request to firmware.
3363 */
3364static bfa_boolean_t
3365bfa_tskim_send(struct bfa_tskim_s *tskim)
3366{
3367 struct bfa_itnim_s *itnim = tskim->itnim;
3368 struct bfi_tskim_req_s *m;
3369
5fbe25c7 3370 /*
a36c61f9
KG
3371 * check for room in queue to send request now
3372 */
3373 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3374 if (!m)
3375 return BFA_FALSE;
3376
5fbe25c7 3377 /*
a36c61f9
KG
3378 * build i/o request message next
3379 */
3380 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3381 bfa_lpuid(tskim->bfa));
3382
ba816ea8 3383 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9
KG
3384 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3385 m->t_secs = tskim->tsecs;
3386 m->lun = tskim->lun;
3387 m->tm_flags = tskim->tm_cmnd;
3388
5fbe25c7 3389 /*
a36c61f9
KG
3390 * queue I/O message to firmware
3391 */
3392 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3393 return BFA_TRUE;
3394}
3395
5fbe25c7 3396/*
a36c61f9
KG
3397 * Send abort request to cleanup an active TM to firmware.
3398 */
3399static bfa_boolean_t
3400bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3401{
3402 struct bfa_itnim_s *itnim = tskim->itnim;
3403 struct bfi_tskim_abortreq_s *m;
3404
5fbe25c7 3405 /*
a36c61f9
KG
3406 * check for room in queue to send request now
3407 */
3408 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3409 if (!m)
3410 return BFA_FALSE;
3411
5fbe25c7 3412 /*
a36c61f9
KG
3413 * build i/o request message next
3414 */
3415 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3416 bfa_lpuid(tskim->bfa));
3417
ba816ea8 3418 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9 3419
5fbe25c7 3420 /*
a36c61f9
KG
3421 * queue I/O message to firmware
3422 */
3423 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3424 return BFA_TRUE;
3425}
3426
5fbe25c7 3427/*
a36c61f9
KG
3428 * Call to resume task management cmnd waiting for room in request queue.
3429 */
3430static void
3431bfa_tskim_qresume(void *cbarg)
3432{
3433 struct bfa_tskim_s *tskim = cbarg;
3434
3435 bfa_stats(tskim->itnim, tm_qresumes);
3436 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3437}
3438
5fbe25c7 3439/*
a36c61f9
KG
3440 * Cleanup IOs associated with a task mangement command on IOC failures.
3441 */
3442static void
3443bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3444{
3445 struct bfa_ioim_s *ioim;
3446 struct list_head *qe, *qen;
3447
3448 list_for_each_safe(qe, qen, &tskim->io_q) {
3449 ioim = (struct bfa_ioim_s *) qe;
3450 bfa_ioim_iocdisable(ioim);
3451 }
3452}
3453
3454
3455
5fbe25c7 3456/*
a36c61f9
KG
3457 * hal_tskim_friend
3458 */
3459
5fbe25c7 3460/*
a36c61f9
KG
3461 * Notification on completions from related ioim.
3462 */
3463void
3464bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3465{
3466 bfa_wc_down(&tskim->wc);
3467}
3468
5fbe25c7 3469/*
a36c61f9
KG
3470 * Handle IOC h/w failure notification from itnim.
3471 */
3472void
3473bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3474{
3475 tskim->notify = BFA_FALSE;
3476 bfa_stats(tskim->itnim, tm_iocdowns);
3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3478}
3479
5fbe25c7 3480/*
a36c61f9
KG
3481 * Cleanup TM command and associated IOs as part of ITNIM offline.
3482 */
3483void
3484bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3485{
3486 tskim->notify = BFA_TRUE;
3487 bfa_stats(tskim->itnim, tm_cleanups);
3488 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3489}
3490
5fbe25c7 3491/*
a36c61f9
KG
3492 * Memory allocation and initialization.
3493 */
3494void
3495bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3496{
3497 struct bfa_tskim_s *tskim;
3498 u16 i;
3499
3500 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3501
3502 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3503 fcpim->tskim_arr = tskim;
3504
3505 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3506 /*
3507 * initialize TSKIM
3508 */
6a18b167 3509 memset(tskim, 0, sizeof(struct bfa_tskim_s));
a36c61f9
KG
3510 tskim->tsk_tag = i;
3511 tskim->bfa = fcpim->bfa;
3512 tskim->fcpim = fcpim;
3513 tskim->notify = BFA_FALSE;
3514 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3515 tskim);
3516 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3517
3518 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3519 }
3520
3521 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3522}
3523
3524void
3525bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3526{
5fbe25c7 3527 /*
a36c61f9
KG
3528 * @todo
3529 */
3530}
3531
3532void
3533bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3534{
3535 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3536 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3537 struct bfa_tskim_s *tskim;
ba816ea8 3538 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
a36c61f9
KG
3539
3540 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3541 bfa_assert(tskim->tsk_tag == tsk_tag);
3542
3543 tskim->tsk_status = rsp->tsk_status;
3544
5fbe25c7 3545 /*
a36c61f9
KG
3546 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3547 * requests. All other statuses are for normal completions.
3548 */
3549 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3550 bfa_stats(tskim->itnim, tm_cleanup_comps);
3551 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3552 } else {
3553 bfa_stats(tskim->itnim, tm_fw_rsps);
3554 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3555 }
3556}
3557
3558
3559
5fbe25c7 3560/*
a36c61f9
KG
3561 * hal_tskim_api
3562 */
3563
3564
3565struct bfa_tskim_s *
3566bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3567{
3568 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3569 struct bfa_tskim_s *tskim;
3570
3571 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3572
3573 if (tskim)
3574 tskim->dtsk = dtsk;
3575
3576 return tskim;
3577}
3578
3579void
3580bfa_tskim_free(struct bfa_tskim_s *tskim)
3581{
3582 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3583 list_del(&tskim->qe);
3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3585}
3586
5fbe25c7 3587/*
a36c61f9
KG
3588 * Start a task management command.
3589 *
3590 * @param[in] tskim BFA task management command instance
3591 * @param[in] itnim i-t nexus for the task management command
3592 * @param[in] lun lun, if applicable
3593 * @param[in] tm_cmnd Task management command code.
3594 * @param[in] t_secs Timeout in seconds
3595 *
3596 * @return None.
3597 */
3598void
3599bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
3600 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3601{
3602 tskim->itnim = itnim;
3603 tskim->lun = lun;
3604 tskim->tm_cmnd = tm_cmnd;
3605 tskim->tsecs = tsecs;
3606 tskim->notify = BFA_FALSE;
3607 bfa_stats(itnim, tm_cmnds);
3608
3609 list_add_tail(&tskim->qe, &itnim->tsk_q);
3610 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3611}