]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/lpfc/lpfc_sli.c
[SCSI] qla1280: Drop host_lock while requesting firmware
[net-next-2.6.git] / drivers / scsi / lpfc / lpfc_sli.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
d8e93df1 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e
JB
20 *******************************************************************/
21
dea3101e
JB
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e
JB
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
f888ba3c 31#include <scsi/scsi_transport_fc.h>
da0436e9 32#include <scsi/fc/fc_fs.h>
0d878419 33#include <linux/aer.h>
dea3101e 34
da0436e9 35#include "lpfc_hw4.h"
dea3101e
JB
36#include "lpfc_hw.h"
37#include "lpfc_sli.h"
da0436e9 38#include "lpfc_sli4.h"
ea2151b4 39#include "lpfc_nl.h"
dea3101e
JB
40#include "lpfc_disc.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_crtn.h"
44#include "lpfc_logmsg.h"
45#include "lpfc_compat.h"
858c9f6c 46#include "lpfc_debugfs.h"
04c68496 47#include "lpfc_vport.h"
dea3101e
JB
48
49/* There are only four IOCB completion types. */
50typedef enum _lpfc_iocb_type {
51 LPFC_UNKNOWN_IOCB,
52 LPFC_UNSOL_IOCB,
53 LPFC_SOL_IOCB,
54 LPFC_ABORT_IOCB
55} lpfc_iocb_type;
56
4f774513
JS
57
58/* Provide function prototypes local to this module. */
59static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
60 uint32_t);
61static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
45ed1190
JS
62 uint8_t *, uint32_t *);
63static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
64 struct lpfc_iocbq *);
6669f9bb
JS
65static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
66 struct hbq_dmabuf *);
4f774513
JS
67static IOCB_t *
68lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
69{
70 return &iocbq->iocb;
71}
72
73/**
74 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
75 * @q: The Work Queue to operate on.
76 * @wqe: The work Queue Entry to put on the Work queue.
77 *
78 * This routine will copy the contents of @wqe to the next available entry on
79 * the @q. This function will then ring the Work Queue Doorbell to signal the
80 * HBA to start processing the Work Queue Entry. This function returns 0 if
81 * successful. If no entries are available on @q then this function will return
82 * -ENOMEM.
83 * The caller is expected to hold the hbalock when calling this routine.
84 **/
85static uint32_t
86lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
87{
88 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
89 struct lpfc_register doorbell;
90 uint32_t host_index;
91
92 /* If the host has not yet processed the next entry then we are done */
93 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
94 return -ENOMEM;
95 /* set consumption flag every once in a while */
96 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
97 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
98
99 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
100
101 /* Update the host index before invoking device */
102 host_index = q->host_index;
103 q->host_index = ((q->host_index + 1) % q->entry_count);
104
105 /* Ring Doorbell */
106 doorbell.word0 = 0;
107 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
108 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
109 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
110 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
111 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
112
113 return 0;
114}
115
116/**
117 * lpfc_sli4_wq_release - Updates internal hba index for WQ
118 * @q: The Work Queue to operate on.
119 * @index: The index to advance the hba index to.
120 *
121 * This routine will update the HBA index of a queue to reflect consumption of
122 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
123 * an entry the host calls this function to update the queue's internal
124 * pointers. This routine returns the number of entries that were consumed by
125 * the HBA.
126 **/
127static uint32_t
128lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
129{
130 uint32_t released = 0;
131
132 if (q->hba_index == index)
133 return 0;
134 do {
135 q->hba_index = ((q->hba_index + 1) % q->entry_count);
136 released++;
137 } while (q->hba_index != index);
138 return released;
139}
140
141/**
142 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
143 * @q: The Mailbox Queue to operate on.
144 * @wqe: The Mailbox Queue Entry to put on the Work queue.
145 *
146 * This routine will copy the contents of @mqe to the next available entry on
147 * the @q. This function will then ring the Work Queue Doorbell to signal the
148 * HBA to start processing the Work Queue Entry. This function returns 0 if
149 * successful. If no entries are available on @q then this function will return
150 * -ENOMEM.
151 * The caller is expected to hold the hbalock when calling this routine.
152 **/
153static uint32_t
154lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
155{
156 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
157 struct lpfc_register doorbell;
158 uint32_t host_index;
159
160 /* If the host has not yet processed the next entry then we are done */
161 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
162 return -ENOMEM;
163 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
164 /* Save off the mailbox pointer for completion */
165 q->phba->mbox = (MAILBOX_t *)temp_mqe;
166
167 /* Update the host index before invoking device */
168 host_index = q->host_index;
169 q->host_index = ((q->host_index + 1) % q->entry_count);
170
171 /* Ring Doorbell */
172 doorbell.word0 = 0;
173 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
174 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
175 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
176 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
177 return 0;
178}
179
180/**
181 * lpfc_sli4_mq_release - Updates internal hba index for MQ
182 * @q: The Mailbox Queue to operate on.
183 *
184 * This routine will update the HBA index of a queue to reflect consumption of
185 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
186 * an entry the host calls this function to update the queue's internal
187 * pointers. This routine returns the number of entries that were consumed by
188 * the HBA.
189 **/
190static uint32_t
191lpfc_sli4_mq_release(struct lpfc_queue *q)
192{
193 /* Clear the mailbox pointer for completion */
194 q->phba->mbox = NULL;
195 q->hba_index = ((q->hba_index + 1) % q->entry_count);
196 return 1;
197}
198
199/**
200 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
201 * @q: The Event Queue to get the first valid EQE from
202 *
203 * This routine will get the first valid Event Queue Entry from @q, update
204 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
205 * the Queue (no more work to do), or the Queue is full of EQEs that have been
206 * processed, but not popped back to the HBA then this routine will return NULL.
207 **/
208static struct lpfc_eqe *
209lpfc_sli4_eq_get(struct lpfc_queue *q)
210{
211 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
212
213 /* If the next EQE is not valid then we are done */
214 if (!bf_get(lpfc_eqe_valid, eqe))
215 return NULL;
216 /* If the host has not yet processed the next entry then we are done */
217 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
218 return NULL;
219
220 q->hba_index = ((q->hba_index + 1) % q->entry_count);
221 return eqe;
222}
223
224/**
225 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
226 * @q: The Event Queue that the host has completed processing for.
227 * @arm: Indicates whether the host wants to arms this CQ.
228 *
229 * This routine will mark all Event Queue Entries on @q, from the last
230 * known completed entry to the last entry that was processed, as completed
231 * by clearing the valid bit for each completion queue entry. Then it will
232 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
233 * The internal host index in the @q will be updated by this routine to indicate
234 * that the host has finished processing the entries. The @arm parameter
235 * indicates that the queue should be rearmed when ringing the doorbell.
236 *
237 * This function will return the number of EQEs that were popped.
238 **/
239uint32_t
240lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
241{
242 uint32_t released = 0;
243 struct lpfc_eqe *temp_eqe;
244 struct lpfc_register doorbell;
245
246 /* while there are valid entries */
247 while (q->hba_index != q->host_index) {
248 temp_eqe = q->qe[q->host_index].eqe;
249 bf_set(lpfc_eqe_valid, temp_eqe, 0);
250 released++;
251 q->host_index = ((q->host_index + 1) % q->entry_count);
252 }
253 if (unlikely(released == 0 && !arm))
254 return 0;
255
256 /* ring doorbell for number popped */
257 doorbell.word0 = 0;
258 if (arm) {
259 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
260 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
261 }
262 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
263 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
264 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
265 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
a747c9ce
JS
266 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
267 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
268 readl(q->phba->sli4_hba.EQCQDBregaddr);
4f774513
JS
269 return released;
270}
271
272/**
273 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
274 * @q: The Completion Queue to get the first valid CQE from
275 *
276 * This routine will get the first valid Completion Queue Entry from @q, update
277 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
278 * the Queue (no more work to do), or the Queue is full of CQEs that have been
279 * processed, but not popped back to the HBA then this routine will return NULL.
280 **/
281static struct lpfc_cqe *
282lpfc_sli4_cq_get(struct lpfc_queue *q)
283{
284 struct lpfc_cqe *cqe;
285
286 /* If the next CQE is not valid then we are done */
287 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
288 return NULL;
289 /* If the host has not yet processed the next entry then we are done */
290 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
291 return NULL;
292
293 cqe = q->qe[q->hba_index].cqe;
294 q->hba_index = ((q->hba_index + 1) % q->entry_count);
295 return cqe;
296}
297
298/**
299 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
300 * @q: The Completion Queue that the host has completed processing for.
301 * @arm: Indicates whether the host wants to arms this CQ.
302 *
303 * This routine will mark all Completion queue entries on @q, from the last
304 * known completed entry to the last entry that was processed, as completed
305 * by clearing the valid bit for each completion queue entry. Then it will
306 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
307 * The internal host index in the @q will be updated by this routine to indicate
308 * that the host has finished processing the entries. The @arm parameter
309 * indicates that the queue should be rearmed when ringing the doorbell.
310 *
311 * This function will return the number of CQEs that were released.
312 **/
313uint32_t
314lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
315{
316 uint32_t released = 0;
317 struct lpfc_cqe *temp_qe;
318 struct lpfc_register doorbell;
319
320 /* while there are valid entries */
321 while (q->hba_index != q->host_index) {
322 temp_qe = q->qe[q->host_index].cqe;
323 bf_set(lpfc_cqe_valid, temp_qe, 0);
324 released++;
325 q->host_index = ((q->host_index + 1) % q->entry_count);
326 }
327 if (unlikely(released == 0 && !arm))
328 return 0;
329
330 /* ring doorbell for number popped */
331 doorbell.word0 = 0;
332 if (arm)
333 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
334 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
335 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
336 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
337 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
338 return released;
339}
340
341/**
342 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
343 * @q: The Header Receive Queue to operate on.
344 * @wqe: The Receive Queue Entry to put on the Receive queue.
345 *
346 * This routine will copy the contents of @wqe to the next available entry on
347 * the @q. This function will then ring the Receive Queue Doorbell to signal the
348 * HBA to start processing the Receive Queue Entry. This function returns the
349 * index that the rqe was copied to if successful. If no entries are available
350 * on @q then this function will return -ENOMEM.
351 * The caller is expected to hold the hbalock when calling this routine.
352 **/
353static int
354lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
355 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
356{
357 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
358 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
359 struct lpfc_register doorbell;
360 int put_index = hq->host_index;
361
362 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
363 return -EINVAL;
364 if (hq->host_index != dq->host_index)
365 return -EINVAL;
366 /* If the host has not yet processed the next entry then we are done */
367 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
368 return -EBUSY;
369 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
370 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
371
372 /* Update the host index to point to the next slot */
373 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
374 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
375
376 /* Ring The Header Receive Queue Doorbell */
377 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
378 doorbell.word0 = 0;
379 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
380 LPFC_RQ_POST_BATCH);
381 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
382 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
383 }
384 return put_index;
385}
386
387/**
388 * lpfc_sli4_rq_release - Updates internal hba index for RQ
389 * @q: The Header Receive Queue to operate on.
390 *
391 * This routine will update the HBA index of a queue to reflect consumption of
392 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
393 * consumed an entry the host calls this function to update the queue's
394 * internal pointers. This routine returns the number of entries that were
395 * consumed by the HBA.
396 **/
397static uint32_t
398lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
399{
400 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
401 return 0;
402 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
403 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
404 return 1;
405}
406
e59058c4 407/**
3621a710 408 * lpfc_cmd_iocb - Get next command iocb entry in the ring
e59058c4
JS
409 * @phba: Pointer to HBA context object.
410 * @pring: Pointer to driver SLI ring object.
411 *
412 * This function returns pointer to next command iocb entry
413 * in the command ring. The caller must hold hbalock to prevent
414 * other threads consume the next command iocb.
415 * SLI-2/SLI-3 provide different sized iocbs.
416 **/
ed957684
JS
417static inline IOCB_t *
418lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
419{
420 return (IOCB_t *) (((char *) pring->cmdringaddr) +
421 pring->cmdidx * phba->iocb_cmd_size);
422}
423
e59058c4 424/**
3621a710 425 * lpfc_resp_iocb - Get next response iocb entry in the ring
e59058c4
JS
426 * @phba: Pointer to HBA context object.
427 * @pring: Pointer to driver SLI ring object.
428 *
429 * This function returns pointer to next response iocb entry
430 * in the response ring. The caller must hold hbalock to make sure
431 * that no other thread consume the next response iocb.
432 * SLI-2/SLI-3 provide different sized iocbs.
433 **/
ed957684
JS
434static inline IOCB_t *
435lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
436{
437 return (IOCB_t *) (((char *) pring->rspringaddr) +
438 pring->rspidx * phba->iocb_rsp_size);
439}
440
e59058c4 441/**
3621a710 442 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
443 * @phba: Pointer to HBA context object.
444 *
445 * This function is called with hbalock held. This function
446 * allocates a new driver iocb object from the iocb pool. If the
447 * allocation is successful, it returns pointer to the newly
448 * allocated iocb object else it returns NULL.
449 **/
2e0fef85
JS
450static struct lpfc_iocbq *
451__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0bd4ca25
JSEC
452{
453 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
454 struct lpfc_iocbq * iocbq = NULL;
455
456 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
457 return iocbq;
458}
459
da0436e9
JS
460/**
461 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
462 * @phba: Pointer to HBA context object.
463 * @xritag: XRI value.
464 *
465 * This function clears the sglq pointer from the array of acive
466 * sglq's. The xritag that is passed in is used to index into the
467 * array. Before the xritag can be used it needs to be adjusted
468 * by subtracting the xribase.
469 *
470 * Returns sglq ponter = success, NULL = Failure.
471 **/
472static struct lpfc_sglq *
473__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
474{
475 uint16_t adj_xri;
476 struct lpfc_sglq *sglq;
477 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
478 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
479 return NULL;
480 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
481 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
482 return sglq;
483}
484
485/**
486 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
487 * @phba: Pointer to HBA context object.
488 * @xritag: XRI value.
489 *
490 * This function returns the sglq pointer from the array of acive
491 * sglq's. The xritag that is passed in is used to index into the
492 * array. Before the xritag can be used it needs to be adjusted
493 * by subtracting the xribase.
494 *
495 * Returns sglq ponter = success, NULL = Failure.
496 **/
497static struct lpfc_sglq *
498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
499{
500 uint16_t adj_xri;
501 struct lpfc_sglq *sglq;
502 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
503 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
504 return NULL;
505 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
506 return sglq;
507}
508
509/**
510 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
511 * @phba: Pointer to HBA context object.
512 *
513 * This function is called with hbalock held. This function
514 * Gets a new driver sglq object from the sglq list. If the
515 * list is not empty then it is successful, it returns pointer to the newly
516 * allocated sglq object else it returns NULL.
517 **/
518static struct lpfc_sglq *
519__lpfc_sli_get_sglq(struct lpfc_hba *phba)
520{
521 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
522 struct lpfc_sglq *sglq = NULL;
523 uint16_t adj_xri;
524 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
6a9c52cf
JS
525 if (!sglq)
526 return NULL;
da0436e9
JS
527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
529 return sglq;
530}
531
e59058c4 532/**
3621a710 533 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
e59058c4
JS
534 * @phba: Pointer to HBA context object.
535 *
536 * This function is called with no lock held. This function
537 * allocates a new driver iocb object from the iocb pool. If the
538 * allocation is successful, it returns pointer to the newly
539 * allocated iocb object else it returns NULL.
540 **/
2e0fef85
JS
541struct lpfc_iocbq *
542lpfc_sli_get_iocbq(struct lpfc_hba *phba)
543{
544 struct lpfc_iocbq * iocbq = NULL;
545 unsigned long iflags;
546
547 spin_lock_irqsave(&phba->hbalock, iflags);
548 iocbq = __lpfc_sli_get_iocbq(phba);
549 spin_unlock_irqrestore(&phba->hbalock, iflags);
550 return iocbq;
551}
552
4f774513
JS
553/**
554 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
555 * @phba: Pointer to HBA context object.
556 * @iocbq: Pointer to driver iocb object.
557 *
558 * This function is called with hbalock held to release driver
559 * iocb object to the iocb pool. The iotag in the iocb object
560 * does not change for each use of the iocb object. This function
561 * clears all other fields of the iocb object when it is freed.
562 * The sqlq structure that holds the xritag and phys and virtual
563 * mappings for the scatter gather list is retrieved from the
564 * active array of sglq. The get of the sglq pointer also clears
565 * the entry in the array. If the status of the IO indiactes that
566 * this IO was aborted then the sglq entry it put on the
567 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
568 * IO has good status or fails for any other reason then the sglq
569 * entry is added to the free list (lpfc_sgl_list).
570 **/
571static void
572__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
573{
574 struct lpfc_sglq *sglq;
575 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
576 unsigned long iflag;
577
578 if (iocbq->sli4_xritag == NO_XRI)
579 sglq = NULL;
580 else
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
582 if (sglq) {
583 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
6669f9bb 584 && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
4f774513 585 && (iocbq->iocb.un.ulpWord[4]
6669f9bb 586 == IOERR_ABORT_REQUESTED))) {
4f774513
JS
587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
588 iflag);
589 list_add(&sglq->list,
590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
591 spin_unlock_irqrestore(
592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
593 } else
594 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
595 }
596
597
598 /*
599 * Clean all volatile data fields, preserve iotag and node struct.
600 */
601 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
602 iocbq->sli4_xritag = NO_XRI;
603 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
604}
605
e59058c4 606/**
3772a991 607 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
e59058c4
JS
608 * @phba: Pointer to HBA context object.
609 * @iocbq: Pointer to driver iocb object.
610 *
611 * This function is called with hbalock held to release driver
612 * iocb object to the iocb pool. The iotag in the iocb object
613 * does not change for each use of the iocb object. This function
614 * clears all other fields of the iocb object when it is freed.
615 **/
a6ababd2 616static void
3772a991 617__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
604a3e30 618{
2e0fef85 619 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
604a3e30
JB
620
621 /*
622 * Clean all volatile data fields, preserve iotag and node struct.
623 */
624 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
3772a991 625 iocbq->sli4_xritag = NO_XRI;
604a3e30
JB
626 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
627}
628
3772a991
JS
629/**
630 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
631 * @phba: Pointer to HBA context object.
632 * @iocbq: Pointer to driver iocb object.
633 *
634 * This function is called with hbalock held to release driver
635 * iocb object to the iocb pool. The iotag in the iocb object
636 * does not change for each use of the iocb object. This function
637 * clears all other fields of the iocb object when it is freed.
638 **/
639static void
640__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
641{
642 phba->__lpfc_sli_release_iocbq(phba, iocbq);
643}
644
e59058c4 645/**
3621a710 646 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
e59058c4
JS
647 * @phba: Pointer to HBA context object.
648 * @iocbq: Pointer to driver iocb object.
649 *
650 * This function is called with no lock held to release the iocb to
651 * iocb pool.
652 **/
2e0fef85
JS
653void
654lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
655{
656 unsigned long iflags;
657
658 /*
659 * Clean all volatile data fields, preserve iotag and node struct.
660 */
661 spin_lock_irqsave(&phba->hbalock, iflags);
662 __lpfc_sli_release_iocbq(phba, iocbq);
663 spin_unlock_irqrestore(&phba->hbalock, iflags);
664}
665
a257bf90
JS
666/**
667 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
668 * @phba: Pointer to HBA context object.
669 * @iocblist: List of IOCBs.
670 * @ulpstatus: ULP status in IOCB command field.
671 * @ulpWord4: ULP word-4 in IOCB command field.
672 *
673 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
674 * on the list by invoking the complete callback function associated with the
675 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
676 * fields.
677 **/
678void
679lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
680 uint32_t ulpstatus, uint32_t ulpWord4)
681{
682 struct lpfc_iocbq *piocb;
683
684 while (!list_empty(iocblist)) {
685 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
686
687 if (!piocb->iocb_cmpl)
688 lpfc_sli_release_iocbq(phba, piocb);
689 else {
690 piocb->iocb.ulpStatus = ulpstatus;
691 piocb->iocb.un.ulpWord[4] = ulpWord4;
692 (piocb->iocb_cmpl) (phba, piocb, piocb);
693 }
694 }
695 return;
696}
697
e59058c4 698/**
3621a710
JS
699 * lpfc_sli_iocb_cmd_type - Get the iocb type
700 * @iocb_cmnd: iocb command code.
e59058c4
JS
701 *
702 * This function is called by ring event handler function to get the iocb type.
703 * This function translates the iocb command to an iocb command type used to
704 * decide the final disposition of each completed IOCB.
705 * The function returns
706 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
707 * LPFC_SOL_IOCB if it is a solicited iocb completion
708 * LPFC_ABORT_IOCB if it is an abort iocb
709 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
710 *
711 * The caller is not required to hold any lock.
712 **/
dea3101e
JB
713static lpfc_iocb_type
714lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
715{
716 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
717
718 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
719 return 0;
720
721 switch (iocb_cmnd) {
722 case CMD_XMIT_SEQUENCE_CR:
723 case CMD_XMIT_SEQUENCE_CX:
724 case CMD_XMIT_BCAST_CN:
725 case CMD_XMIT_BCAST_CX:
726 case CMD_ELS_REQUEST_CR:
727 case CMD_ELS_REQUEST_CX:
728 case CMD_CREATE_XRI_CR:
729 case CMD_CREATE_XRI_CX:
730 case CMD_GET_RPI_CN:
731 case CMD_XMIT_ELS_RSP_CX:
732 case CMD_GET_RPI_CR:
733 case CMD_FCP_IWRITE_CR:
734 case CMD_FCP_IWRITE_CX:
735 case CMD_FCP_IREAD_CR:
736 case CMD_FCP_IREAD_CX:
737 case CMD_FCP_ICMND_CR:
738 case CMD_FCP_ICMND_CX:
f5603511
JS
739 case CMD_FCP_TSEND_CX:
740 case CMD_FCP_TRSP_CX:
741 case CMD_FCP_TRECEIVE_CX:
742 case CMD_FCP_AUTO_TRSP_CX:
dea3101e
JB
743 case CMD_ADAPTER_MSG:
744 case CMD_ADAPTER_DUMP:
745 case CMD_XMIT_SEQUENCE64_CR:
746 case CMD_XMIT_SEQUENCE64_CX:
747 case CMD_XMIT_BCAST64_CN:
748 case CMD_XMIT_BCAST64_CX:
749 case CMD_ELS_REQUEST64_CR:
750 case CMD_ELS_REQUEST64_CX:
751 case CMD_FCP_IWRITE64_CR:
752 case CMD_FCP_IWRITE64_CX:
753 case CMD_FCP_IREAD64_CR:
754 case CMD_FCP_IREAD64_CX:
755 case CMD_FCP_ICMND64_CR:
756 case CMD_FCP_ICMND64_CX:
f5603511
JS
757 case CMD_FCP_TSEND64_CX:
758 case CMD_FCP_TRSP64_CX:
759 case CMD_FCP_TRECEIVE64_CX:
dea3101e
JB
760 case CMD_GEN_REQUEST64_CR:
761 case CMD_GEN_REQUEST64_CX:
762 case CMD_XMIT_ELS_RSP64_CX:
da0436e9
JS
763 case DSSCMD_IWRITE64_CR:
764 case DSSCMD_IWRITE64_CX:
765 case DSSCMD_IREAD64_CR:
766 case DSSCMD_IREAD64_CX:
767 case DSSCMD_INVALIDATE_DEK:
768 case DSSCMD_SET_KEK:
769 case DSSCMD_GET_KEK_ID:
770 case DSSCMD_GEN_XFER:
dea3101e
JB
771 type = LPFC_SOL_IOCB;
772 break;
773 case CMD_ABORT_XRI_CN:
774 case CMD_ABORT_XRI_CX:
775 case CMD_CLOSE_XRI_CN:
776 case CMD_CLOSE_XRI_CX:
777 case CMD_XRI_ABORTED_CX:
778 case CMD_ABORT_MXRI64_CN:
6669f9bb 779 case CMD_XMIT_BLS_RSP64_CX:
dea3101e
JB
780 type = LPFC_ABORT_IOCB;
781 break;
782 case CMD_RCV_SEQUENCE_CX:
783 case CMD_RCV_ELS_REQ_CX:
784 case CMD_RCV_SEQUENCE64_CX:
785 case CMD_RCV_ELS_REQ64_CX:
57127f15 786 case CMD_ASYNC_STATUS:
ed957684
JS
787 case CMD_IOCB_RCV_SEQ64_CX:
788 case CMD_IOCB_RCV_ELS64_CX:
789 case CMD_IOCB_RCV_CONT64_CX:
3163f725 790 case CMD_IOCB_RET_XRI64_CX:
dea3101e
JB
791 type = LPFC_UNSOL_IOCB;
792 break;
3163f725
JS
793 case CMD_IOCB_XMIT_MSEQ64_CR:
794 case CMD_IOCB_XMIT_MSEQ64_CX:
795 case CMD_IOCB_RCV_SEQ_LIST64_CX:
796 case CMD_IOCB_RCV_ELS_LIST64_CX:
797 case CMD_IOCB_CLOSE_EXTENDED_CN:
798 case CMD_IOCB_ABORT_EXTENDED_CN:
799 case CMD_IOCB_RET_HBQE64_CN:
800 case CMD_IOCB_FCP_IBIDIR64_CR:
801 case CMD_IOCB_FCP_IBIDIR64_CX:
802 case CMD_IOCB_FCP_ITASKMGT64_CX:
803 case CMD_IOCB_LOGENTRY_CN:
804 case CMD_IOCB_LOGENTRY_ASYNC_CN:
805 printk("%s - Unhandled SLI-3 Command x%x\n",
cadbd4a5 806 __func__, iocb_cmnd);
3163f725
JS
807 type = LPFC_UNKNOWN_IOCB;
808 break;
dea3101e
JB
809 default:
810 type = LPFC_UNKNOWN_IOCB;
811 break;
812 }
813
814 return type;
815}
816
e59058c4 817/**
3621a710 818 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
e59058c4
JS
819 * @phba: Pointer to HBA context object.
820 *
821 * This function is called from SLI initialization code
822 * to configure every ring of the HBA's SLI interface. The
823 * caller is not required to hold any lock. This function issues
824 * a config_ring mailbox command for each ring.
825 * This function returns zero if successful else returns a negative
826 * error code.
827 **/
dea3101e 828static int
ed957684 829lpfc_sli_ring_map(struct lpfc_hba *phba)
dea3101e
JB
830{
831 struct lpfc_sli *psli = &phba->sli;
ed957684
JS
832 LPFC_MBOXQ_t *pmb;
833 MAILBOX_t *pmbox;
834 int i, rc, ret = 0;
dea3101e 835
ed957684
JS
836 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
837 if (!pmb)
838 return -ENOMEM;
04c68496 839 pmbox = &pmb->u.mb;
ed957684 840 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 841 for (i = 0; i < psli->num_rings; i++) {
dea3101e
JB
842 lpfc_config_ring(phba, i, pmb);
843 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
844 if (rc != MBX_SUCCESS) {
92d7f7b0 845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 846 "0446 Adapter failed to init (%d), "
dea3101e
JB
847 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
848 "ring %d\n",
e8b62011
JS
849 rc, pmbox->mbxCommand,
850 pmbox->mbxStatus, i);
2e0fef85 851 phba->link_state = LPFC_HBA_ERROR;
ed957684
JS
852 ret = -ENXIO;
853 break;
dea3101e
JB
854 }
855 }
ed957684
JS
856 mempool_free(pmb, phba->mbox_mem_pool);
857 return ret;
dea3101e
JB
858}
859
e59058c4 860/**
3621a710 861 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
e59058c4
JS
862 * @phba: Pointer to HBA context object.
863 * @pring: Pointer to driver SLI ring object.
864 * @piocb: Pointer to the driver iocb object.
865 *
866 * This function is called with hbalock held. The function adds the
867 * new iocb to txcmplq of the given ring. This function always returns
868 * 0. If this function is called for ELS ring, this function checks if
869 * there is a vport associated with the ELS command. This function also
870 * starts els_tmofunc timer if this is an ELS command.
871 **/
dea3101e 872static int
2e0fef85
JS
873lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
874 struct lpfc_iocbq *piocb)
dea3101e 875{
dea3101e
JB
876 list_add_tail(&piocb->list, &pring->txcmplq);
877 pring->txcmplq_cnt++;
92d7f7b0
JS
878 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
879 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
880 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
881 if (!piocb->vport)
882 BUG();
883 else
884 mod_timer(&piocb->vport->els_tmofunc,
885 jiffies + HZ * (phba->fc_ratov << 1));
886 }
887
dea3101e 888
2e0fef85 889 return 0;
dea3101e
JB
890}
891
e59058c4 892/**
3621a710 893 * lpfc_sli_ringtx_get - Get first element of the txq
e59058c4
JS
894 * @phba: Pointer to HBA context object.
895 * @pring: Pointer to driver SLI ring object.
896 *
897 * This function is called with hbalock held to get next
898 * iocb in txq of the given ring. If there is any iocb in
899 * the txq, the function returns first iocb in the list after
900 * removing the iocb from the list, else it returns NULL.
901 **/
dea3101e 902static struct lpfc_iocbq *
2e0fef85 903lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e 904{
dea3101e
JB
905 struct lpfc_iocbq *cmd_iocb;
906
858c9f6c
JS
907 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
908 if (cmd_iocb != NULL)
dea3101e 909 pring->txq_cnt--;
2e0fef85 910 return cmd_iocb;
dea3101e
JB
911}
912
e59058c4 913/**
3621a710 914 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
e59058c4
JS
915 * @phba: Pointer to HBA context object.
916 * @pring: Pointer to driver SLI ring object.
917 *
918 * This function is called with hbalock held and the caller must post the
919 * iocb without releasing the lock. If the caller releases the lock,
920 * iocb slot returned by the function is not guaranteed to be available.
921 * The function returns pointer to the next available iocb slot if there
922 * is available slot in the ring, else it returns NULL.
923 * If the get index of the ring is ahead of the put index, the function
924 * will post an error attention event to the worker thread to take the
925 * HBA to offline state.
926 **/
dea3101e
JB
927static IOCB_t *
928lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
929{
34b02dcd 930 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 931 uint32_t max_cmd_idx = pring->numCiocb;
dea3101e
JB
932 if ((pring->next_cmdidx == pring->cmdidx) &&
933 (++pring->next_cmdidx >= max_cmd_idx))
934 pring->next_cmdidx = 0;
935
936 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
937
938 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
939
940 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 942 "0315 Ring %d issue: portCmdGet %d "
025dfdaf 943 "is bigger than cmd ring %d\n",
e8b62011 944 pring->ringno,
dea3101e
JB
945 pring->local_getidx, max_cmd_idx);
946
2e0fef85 947 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
948 /*
949 * All error attention handlers are posted to
950 * worker thread
951 */
952 phba->work_ha |= HA_ERATT;
953 phba->work_hs = HS_FFER3;
92d7f7b0 954
5e9d9b82 955 lpfc_worker_wake_up(phba);
dea3101e
JB
956
957 return NULL;
958 }
959
960 if (pring->local_getidx == pring->next_cmdidx)
961 return NULL;
962 }
963
ed957684 964 return lpfc_cmd_iocb(phba, pring);
dea3101e
JB
965}
966
e59058c4 967/**
3621a710 968 * lpfc_sli_next_iotag - Get an iotag for the iocb
e59058c4
JS
969 * @phba: Pointer to HBA context object.
970 * @iocbq: Pointer to driver iocb object.
971 *
972 * This function gets an iotag for the iocb. If there is no unused iotag and
973 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
974 * array and assigns a new iotag.
975 * The function returns the allocated iotag if successful, else returns zero.
976 * Zero is not a valid iotag.
977 * The caller is not required to hold any lock.
978 **/
604a3e30 979uint16_t
2e0fef85 980lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea3101e 981{
2e0fef85
JS
982 struct lpfc_iocbq **new_arr;
983 struct lpfc_iocbq **old_arr;
604a3e30
JB
984 size_t new_len;
985 struct lpfc_sli *psli = &phba->sli;
986 uint16_t iotag;
dea3101e 987
2e0fef85 988 spin_lock_irq(&phba->hbalock);
604a3e30
JB
989 iotag = psli->last_iotag;
990 if(++iotag < psli->iocbq_lookup_len) {
991 psli->last_iotag = iotag;
992 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 993 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
994 iocbq->iotag = iotag;
995 return iotag;
2e0fef85 996 } else if (psli->iocbq_lookup_len < (0xffff
604a3e30
JB
997 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
998 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2e0fef85
JS
999 spin_unlock_irq(&phba->hbalock);
1000 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
604a3e30
JB
1001 GFP_KERNEL);
1002 if (new_arr) {
2e0fef85 1003 spin_lock_irq(&phba->hbalock);
604a3e30
JB
1004 old_arr = psli->iocbq_lookup;
1005 if (new_len <= psli->iocbq_lookup_len) {
1006 /* highly unprobable case */
1007 kfree(new_arr);
1008 iotag = psli->last_iotag;
1009 if(++iotag < psli->iocbq_lookup_len) {
1010 psli->last_iotag = iotag;
1011 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1012 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1013 iocbq->iotag = iotag;
1014 return iotag;
1015 }
2e0fef85 1016 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1017 return 0;
1018 }
1019 if (psli->iocbq_lookup)
1020 memcpy(new_arr, old_arr,
1021 ((psli->last_iotag + 1) *
311464ec 1022 sizeof (struct lpfc_iocbq *)));
604a3e30
JB
1023 psli->iocbq_lookup = new_arr;
1024 psli->iocbq_lookup_len = new_len;
1025 psli->last_iotag = iotag;
1026 psli->iocbq_lookup[iotag] = iocbq;
2e0fef85 1027 spin_unlock_irq(&phba->hbalock);
604a3e30
JB
1028 iocbq->iotag = iotag;
1029 kfree(old_arr);
1030 return iotag;
1031 }
8f6d98d2 1032 } else
2e0fef85 1033 spin_unlock_irq(&phba->hbalock);
dea3101e 1034
604a3e30 1035 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
e8b62011
JS
1036 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1037 psli->last_iotag);
dea3101e 1038
604a3e30 1039 return 0;
dea3101e
JB
1040}
1041
e59058c4 1042/**
3621a710 1043 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
e59058c4
JS
1044 * @phba: Pointer to HBA context object.
1045 * @pring: Pointer to driver SLI ring object.
1046 * @iocb: Pointer to iocb slot in the ring.
1047 * @nextiocb: Pointer to driver iocb object which need to be
1048 * posted to firmware.
1049 *
1050 * This function is called with hbalock held to post a new iocb to
1051 * the firmware. This function copies the new iocb to ring iocb slot and
1052 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1053 * a completion call back for this iocb else the function will free the
1054 * iocb object.
1055 **/
dea3101e
JB
1056static void
1057lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1058 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1059{
1060 /*
604a3e30 1061 * Set up an iotag
dea3101e 1062 */
604a3e30 1063 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea3101e 1064
e2a0a9d6 1065
a58cbd52
JS
1066 if (pring->ringno == LPFC_ELS_RING) {
1067 lpfc_debugfs_slow_ring_trc(phba,
1068 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1069 *(((uint32_t *) &nextiocb->iocb) + 4),
1070 *(((uint32_t *) &nextiocb->iocb) + 6),
1071 *(((uint32_t *) &nextiocb->iocb) + 7));
1072 }
1073
dea3101e
JB
1074 /*
1075 * Issue iocb command to adapter
1076 */
92d7f7b0 1077 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea3101e
JB
1078 wmb();
1079 pring->stats.iocb_cmd++;
1080
1081 /*
1082 * If there is no completion routine to call, we can release the
1083 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1084 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1085 */
1086 if (nextiocb->iocb_cmpl)
1087 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
604a3e30 1088 else
2e0fef85 1089 __lpfc_sli_release_iocbq(phba, nextiocb);
dea3101e
JB
1090
1091 /*
1092 * Let the HBA know what IOCB slot will be the next one the
1093 * driver will put a command into.
1094 */
1095 pring->cmdidx = pring->next_cmdidx;
ed957684 1096 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea3101e
JB
1097}
1098
e59058c4 1099/**
3621a710 1100 * lpfc_sli_update_full_ring - Update the chip attention register
e59058c4
JS
1101 * @phba: Pointer to HBA context object.
1102 * @pring: Pointer to driver SLI ring object.
1103 *
1104 * The caller is not required to hold any lock for calling this function.
1105 * This function updates the chip attention bits for the ring to inform firmware
1106 * that there are pending work to be done for this ring and requests an
1107 * interrupt when there is space available in the ring. This function is
1108 * called when the driver is unable to post more iocbs to the ring due
1109 * to unavailability of space in the ring.
1110 **/
dea3101e 1111static void
2e0fef85 1112lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1113{
1114 int ringno = pring->ringno;
1115
1116 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1117
1118 wmb();
1119
1120 /*
1121 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1122 * The HBA will tell us when an IOCB entry is available.
1123 */
1124 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1125 readl(phba->CAregaddr); /* flush */
1126
1127 pring->stats.iocb_cmd_full++;
1128}
1129
e59058c4 1130/**
3621a710 1131 * lpfc_sli_update_ring - Update chip attention register
e59058c4
JS
1132 * @phba: Pointer to HBA context object.
1133 * @pring: Pointer to driver SLI ring object.
1134 *
1135 * This function updates the chip attention register bit for the
1136 * given ring to inform HBA that there is more work to be done
1137 * in this ring. The caller is not required to hold any lock.
1138 **/
dea3101e 1139static void
2e0fef85 1140lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1141{
1142 int ringno = pring->ringno;
1143
1144 /*
1145 * Tell the HBA that there is work to do in this ring.
1146 */
34b02dcd
JS
1147 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1148 wmb();
1149 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1150 readl(phba->CAregaddr); /* flush */
1151 }
dea3101e
JB
1152}
1153
e59058c4 1154/**
3621a710 1155 * lpfc_sli_resume_iocb - Process iocbs in the txq
e59058c4
JS
1156 * @phba: Pointer to HBA context object.
1157 * @pring: Pointer to driver SLI ring object.
1158 *
1159 * This function is called with hbalock held to post pending iocbs
1160 * in the txq to the firmware. This function is called when driver
1161 * detects space available in the ring.
1162 **/
dea3101e 1163static void
2e0fef85 1164lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea3101e
JB
1165{
1166 IOCB_t *iocb;
1167 struct lpfc_iocbq *nextiocb;
1168
1169 /*
1170 * Check to see if:
1171 * (a) there is anything on the txq to send
1172 * (b) link is up
1173 * (c) link attention events can be processed (fcp ring only)
1174 * (d) IOCB processing is not blocked by the outstanding mbox command.
1175 */
1176 if (pring->txq_cnt &&
2e0fef85 1177 lpfc_is_link_up(phba) &&
dea3101e 1178 (pring->ringno != phba->sli.fcp_ring ||
0b727fea 1179 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea3101e
JB
1180
1181 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1182 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1183 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1184
1185 if (iocb)
1186 lpfc_sli_update_ring(phba, pring);
1187 else
1188 lpfc_sli_update_full_ring(phba, pring);
1189 }
1190
1191 return;
1192}
1193
e59058c4 1194/**
3621a710 1195 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
e59058c4
JS
1196 * @phba: Pointer to HBA context object.
1197 * @hbqno: HBQ number.
1198 *
1199 * This function is called with hbalock held to get the next
1200 * available slot for the given HBQ. If there is free slot
1201 * available for the HBQ it will return pointer to the next available
1202 * HBQ entry else it will return NULL.
1203 **/
a6ababd2 1204static struct lpfc_hbq_entry *
ed957684
JS
1205lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1206{
1207 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1208
1209 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1210 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1211 hbqp->next_hbqPutIdx = 0;
1212
1213 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
92d7f7b0 1214 uint32_t raw_index = phba->hbq_get[hbqno];
ed957684
JS
1215 uint32_t getidx = le32_to_cpu(raw_index);
1216
1217 hbqp->local_hbqGetIdx = getidx;
1218
1219 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1220 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 1221 LOG_SLI | LOG_VPORT,
e8b62011 1222 "1802 HBQ %d: local_hbqGetIdx "
ed957684 1223 "%u is > than hbqp->entry_count %u\n",
e8b62011 1224 hbqno, hbqp->local_hbqGetIdx,
ed957684
JS
1225 hbqp->entry_count);
1226
1227 phba->link_state = LPFC_HBA_ERROR;
1228 return NULL;
1229 }
1230
1231 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1232 return NULL;
1233 }
1234
51ef4c26
JS
1235 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1236 hbqp->hbqPutIdx;
ed957684
JS
1237}
1238
e59058c4 1239/**
3621a710 1240 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
e59058c4
JS
1241 * @phba: Pointer to HBA context object.
1242 *
1243 * This function is called with no lock held to free all the
1244 * hbq buffers while uninitializing the SLI interface. It also
1245 * frees the HBQ buffers returned by the firmware but not yet
1246 * processed by the upper layers.
1247 **/
ed957684
JS
1248void
1249lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1250{
92d7f7b0
JS
1251 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1252 struct hbq_dmabuf *hbq_buf;
3163f725 1253 unsigned long flags;
51ef4c26 1254 int i, hbq_count;
3163f725 1255 uint32_t hbqno;
ed957684 1256
51ef4c26 1257 hbq_count = lpfc_sli_hbq_count();
ed957684 1258 /* Return all memory used by all HBQs */
3163f725 1259 spin_lock_irqsave(&phba->hbalock, flags);
51ef4c26
JS
1260 for (i = 0; i < hbq_count; ++i) {
1261 list_for_each_entry_safe(dmabuf, next_dmabuf,
1262 &phba->hbqs[i].hbq_buffer_list, list) {
1263 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1264 list_del(&hbq_buf->dbuf.list);
1265 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1266 }
a8adb832 1267 phba->hbqs[i].buffer_count = 0;
ed957684 1268 }
3163f725 1269 /* Return all HBQ buffer that are in-fly */
3772a991
JS
1270 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1271 list) {
3163f725
JS
1272 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1273 list_del(&hbq_buf->dbuf.list);
1274 if (hbq_buf->tag == -1) {
1275 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1276 (phba, hbq_buf);
1277 } else {
1278 hbqno = hbq_buf->tag >> 16;
1279 if (hbqno >= LPFC_MAX_HBQS)
1280 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1281 (phba, hbq_buf);
1282 else
1283 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1284 hbq_buf);
1285 }
1286 }
1287
1288 /* Mark the HBQs not in use */
1289 phba->hbq_in_use = 0;
1290 spin_unlock_irqrestore(&phba->hbalock, flags);
ed957684
JS
1291}
1292
e59058c4 1293/**
3621a710 1294 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
e59058c4
JS
1295 * @phba: Pointer to HBA context object.
1296 * @hbqno: HBQ number.
1297 * @hbq_buf: Pointer to HBQ buffer.
1298 *
1299 * This function is called with the hbalock held to post a
1300 * hbq buffer to the firmware. If the function finds an empty
1301 * slot in the HBQ, it will post the buffer. The function will return
1302 * pointer to the hbq entry if it successfully post the buffer
1303 * else it will return NULL.
1304 **/
3772a991 1305static int
ed957684 1306lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
92d7f7b0 1307 struct hbq_dmabuf *hbq_buf)
3772a991
JS
1308{
1309 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1310}
1311
1312/**
1313 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1314 * @phba: Pointer to HBA context object.
1315 * @hbqno: HBQ number.
1316 * @hbq_buf: Pointer to HBQ buffer.
1317 *
1318 * This function is called with the hbalock held to post a hbq buffer to the
1319 * firmware. If the function finds an empty slot in the HBQ, it will post the
1320 * buffer and place it on the hbq_buffer_list. The function will return zero if
1321 * it successfully post the buffer else it will return an error.
1322 **/
1323static int
1324lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1325 struct hbq_dmabuf *hbq_buf)
ed957684
JS
1326{
1327 struct lpfc_hbq_entry *hbqe;
92d7f7b0 1328 dma_addr_t physaddr = hbq_buf->dbuf.phys;
ed957684
JS
1329
1330 /* Get next HBQ entry slot to use */
1331 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1332 if (hbqe) {
1333 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1334
92d7f7b0
JS
1335 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1336 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
51ef4c26 1337 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
ed957684 1338 hbqe->bde.tus.f.bdeFlags = 0;
92d7f7b0
JS
1339 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1340 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1341 /* Sync SLIM */
ed957684
JS
1342 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1343 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
92d7f7b0 1344 /* flush */
ed957684 1345 readl(phba->hbq_put + hbqno);
51ef4c26 1346 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
3772a991
JS
1347 return 0;
1348 } else
1349 return -ENOMEM;
ed957684
JS
1350}
1351
4f774513
JS
1352/**
1353 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1354 * @phba: Pointer to HBA context object.
1355 * @hbqno: HBQ number.
1356 * @hbq_buf: Pointer to HBQ buffer.
1357 *
1358 * This function is called with the hbalock held to post an RQE to the SLI4
1359 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1360 * the hbq_buffer_list and return zero, otherwise it will return an error.
1361 **/
1362static int
1363lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1364 struct hbq_dmabuf *hbq_buf)
1365{
1366 int rc;
1367 struct lpfc_rqe hrqe;
1368 struct lpfc_rqe drqe;
1369
1370 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1371 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1372 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1373 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1374 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1375 &hrqe, &drqe);
1376 if (rc < 0)
1377 return rc;
1378 hbq_buf->tag = rc;
1379 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1380 return 0;
1381}
1382
e59058c4 1383/* HBQ for ELS and CT traffic. */
92d7f7b0
JS
1384static struct lpfc_hbq_init lpfc_els_hbq = {
1385 .rn = 1,
def9c7a9 1386 .entry_count = 256,
92d7f7b0
JS
1387 .mask_count = 0,
1388 .profile = 0,
51ef4c26 1389 .ring_mask = (1 << LPFC_ELS_RING),
92d7f7b0 1390 .buffer_count = 0,
a257bf90
JS
1391 .init_count = 40,
1392 .add_count = 40,
92d7f7b0 1393};
ed957684 1394
e59058c4 1395/* HBQ for the extra ring if needed */
51ef4c26
JS
1396static struct lpfc_hbq_init lpfc_extra_hbq = {
1397 .rn = 1,
1398 .entry_count = 200,
1399 .mask_count = 0,
1400 .profile = 0,
1401 .ring_mask = (1 << LPFC_EXTRA_RING),
1402 .buffer_count = 0,
1403 .init_count = 0,
1404 .add_count = 5,
1405};
1406
e59058c4 1407/* Array of HBQs */
78b2d852 1408struct lpfc_hbq_init *lpfc_hbq_defs[] = {
92d7f7b0 1409 &lpfc_els_hbq,
51ef4c26 1410 &lpfc_extra_hbq,
92d7f7b0 1411};
ed957684 1412
e59058c4 1413/**
3621a710 1414 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
e59058c4
JS
1415 * @phba: Pointer to HBA context object.
1416 * @hbqno: HBQ number.
1417 * @count: Number of HBQ buffers to be posted.
1418 *
d7c255b2
JS
1419 * This function is called with no lock held to post more hbq buffers to the
1420 * given HBQ. The function returns the number of HBQ buffers successfully
1421 * posted.
e59058c4 1422 **/
311464ec 1423static int
92d7f7b0 1424lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
ed957684 1425{
d7c255b2 1426 uint32_t i, posted = 0;
3163f725 1427 unsigned long flags;
92d7f7b0 1428 struct hbq_dmabuf *hbq_buffer;
d7c255b2 1429 LIST_HEAD(hbq_buf_list);
eafe1df9 1430 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
51ef4c26 1431 return 0;
51ef4c26 1432
d7c255b2
JS
1433 if ((phba->hbqs[hbqno].buffer_count + count) >
1434 lpfc_hbq_defs[hbqno]->entry_count)
1435 count = lpfc_hbq_defs[hbqno]->entry_count -
1436 phba->hbqs[hbqno].buffer_count;
1437 if (!count)
1438 return 0;
1439 /* Allocate HBQ entries */
1440 for (i = 0; i < count; i++) {
1441 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1442 if (!hbq_buffer)
1443 break;
1444 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1445 }
3163f725
JS
1446 /* Check whether HBQ is still in use */
1447 spin_lock_irqsave(&phba->hbalock, flags);
eafe1df9 1448 if (!phba->hbq_in_use)
d7c255b2
JS
1449 goto err;
1450 while (!list_empty(&hbq_buf_list)) {
1451 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1452 dbuf.list);
1453 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1454 (hbqno << 16));
3772a991 1455 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
a8adb832 1456 phba->hbqs[hbqno].buffer_count++;
d7c255b2
JS
1457 posted++;
1458 } else
51ef4c26 1459 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684 1460 }
3163f725 1461 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1462 return posted;
1463err:
eafe1df9 1464 spin_unlock_irqrestore(&phba->hbalock, flags);
d7c255b2
JS
1465 while (!list_empty(&hbq_buf_list)) {
1466 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1467 dbuf.list);
1468 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1469 }
1470 return 0;
ed957684
JS
1471}
1472
e59058c4 1473/**
3621a710 1474 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
e59058c4
JS
1475 * @phba: Pointer to HBA context object.
1476 * @qno: HBQ number.
1477 *
1478 * This function posts more buffers to the HBQ. This function
d7c255b2
JS
1479 * is called with no lock held. The function returns the number of HBQ entries
1480 * successfully allocated.
e59058c4 1481 **/
92d7f7b0
JS
1482int
1483lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
ed957684 1484{
def9c7a9
JS
1485 if (phba->sli_rev == LPFC_SLI_REV4)
1486 return 0;
1487 else
1488 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1489 lpfc_hbq_defs[qno]->add_count);
92d7f7b0 1490}
ed957684 1491
e59058c4 1492/**
3621a710 1493 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
e59058c4
JS
1494 * @phba: Pointer to HBA context object.
1495 * @qno: HBQ queue number.
1496 *
1497 * This function is called from SLI initialization code path with
1498 * no lock held to post initial HBQ buffers to firmware. The
d7c255b2 1499 * function returns the number of HBQ entries successfully allocated.
e59058c4 1500 **/
a6ababd2 1501static int
92d7f7b0
JS
1502lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1503{
def9c7a9
JS
1504 if (phba->sli_rev == LPFC_SLI_REV4)
1505 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1506 lpfc_hbq_defs[qno]->entry_count);
1507 else
1508 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1509 lpfc_hbq_defs[qno]->init_count);
ed957684
JS
1510}
1511
3772a991
JS
1512/**
1513 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1514 * @phba: Pointer to HBA context object.
1515 * @hbqno: HBQ number.
1516 *
1517 * This function removes the first hbq buffer on an hbq list and returns a
1518 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1519 **/
1520static struct hbq_dmabuf *
1521lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1522{
1523 struct lpfc_dmabuf *d_buf;
1524
1525 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1526 if (!d_buf)
1527 return NULL;
1528 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1529}
1530
e59058c4 1531/**
3621a710 1532 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
e59058c4
JS
1533 * @phba: Pointer to HBA context object.
1534 * @tag: Tag of the hbq buffer.
1535 *
1536 * This function is called with hbalock held. This function searches
1537 * for the hbq buffer associated with the given tag in the hbq buffer
1538 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1539 * it returns NULL.
1540 **/
a6ababd2 1541static struct hbq_dmabuf *
92d7f7b0 1542lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
ed957684 1543{
92d7f7b0
JS
1544 struct lpfc_dmabuf *d_buf;
1545 struct hbq_dmabuf *hbq_buf;
51ef4c26
JS
1546 uint32_t hbqno;
1547
1548 hbqno = tag >> 16;
a0a74e45 1549 if (hbqno >= LPFC_MAX_HBQS)
51ef4c26 1550 return NULL;
ed957684 1551
3772a991 1552 spin_lock_irq(&phba->hbalock);
51ef4c26 1553 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
92d7f7b0 1554 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
51ef4c26 1555 if (hbq_buf->tag == tag) {
3772a991 1556 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1557 return hbq_buf;
ed957684
JS
1558 }
1559 }
3772a991 1560 spin_unlock_irq(&phba->hbalock);
92d7f7b0 1561 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
e8b62011 1562 "1803 Bad hbq tag. Data: x%x x%x\n",
a8adb832 1563 tag, phba->hbqs[tag >> 16].buffer_count);
92d7f7b0 1564 return NULL;
ed957684
JS
1565}
1566
e59058c4 1567/**
3621a710 1568 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
e59058c4
JS
1569 * @phba: Pointer to HBA context object.
1570 * @hbq_buffer: Pointer to HBQ buffer.
1571 *
1572 * This function is called with hbalock. This function gives back
1573 * the hbq buffer to firmware. If the HBQ does not have space to
1574 * post the buffer, it will free the buffer.
1575 **/
ed957684 1576void
51ef4c26 1577lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
ed957684
JS
1578{
1579 uint32_t hbqno;
1580
51ef4c26
JS
1581 if (hbq_buffer) {
1582 hbqno = hbq_buffer->tag >> 16;
3772a991 1583 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
51ef4c26 1584 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
ed957684
JS
1585 }
1586}
1587
e59058c4 1588/**
3621a710 1589 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
e59058c4
JS
1590 * @mbxCommand: mailbox command code.
1591 *
1592 * This function is called by the mailbox event handler function to verify
1593 * that the completed mailbox command is a legitimate mailbox command. If the
1594 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1595 * and the mailbox event handler will take the HBA offline.
1596 **/
dea3101e
JB
1597static int
1598lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1599{
1600 uint8_t ret;
1601
1602 switch (mbxCommand) {
1603 case MBX_LOAD_SM:
1604 case MBX_READ_NV:
1605 case MBX_WRITE_NV:
a8adb832 1606 case MBX_WRITE_VPARMS:
dea3101e
JB
1607 case MBX_RUN_BIU_DIAG:
1608 case MBX_INIT_LINK:
1609 case MBX_DOWN_LINK:
1610 case MBX_CONFIG_LINK:
1611 case MBX_CONFIG_RING:
1612 case MBX_RESET_RING:
1613 case MBX_READ_CONFIG:
1614 case MBX_READ_RCONFIG:
1615 case MBX_READ_SPARM:
1616 case MBX_READ_STATUS:
1617 case MBX_READ_RPI:
1618 case MBX_READ_XRI:
1619 case MBX_READ_REV:
1620 case MBX_READ_LNK_STAT:
1621 case MBX_REG_LOGIN:
1622 case MBX_UNREG_LOGIN:
1623 case MBX_READ_LA:
1624 case MBX_CLEAR_LA:
1625 case MBX_DUMP_MEMORY:
1626 case MBX_DUMP_CONTEXT:
1627 case MBX_RUN_DIAGS:
1628 case MBX_RESTART:
1629 case MBX_UPDATE_CFG:
1630 case MBX_DOWN_LOAD:
1631 case MBX_DEL_LD_ENTRY:
1632 case MBX_RUN_PROGRAM:
1633 case MBX_SET_MASK:
09372820 1634 case MBX_SET_VARIABLE:
dea3101e 1635 case MBX_UNREG_D_ID:
41415862 1636 case MBX_KILL_BOARD:
dea3101e 1637 case MBX_CONFIG_FARP:
41415862 1638 case MBX_BEACON:
dea3101e
JB
1639 case MBX_LOAD_AREA:
1640 case MBX_RUN_BIU_DIAG64:
1641 case MBX_CONFIG_PORT:
1642 case MBX_READ_SPARM64:
1643 case MBX_READ_RPI64:
1644 case MBX_REG_LOGIN64:
1645 case MBX_READ_LA64:
09372820 1646 case MBX_WRITE_WWN:
dea3101e
JB
1647 case MBX_SET_DEBUG:
1648 case MBX_LOAD_EXP_ROM:
57127f15 1649 case MBX_ASYNCEVT_ENABLE:
92d7f7b0
JS
1650 case MBX_REG_VPI:
1651 case MBX_UNREG_VPI:
858c9f6c 1652 case MBX_HEARTBEAT:
84774a4d
JS
1653 case MBX_PORT_CAPABILITIES:
1654 case MBX_PORT_IOV_CONTROL:
04c68496
JS
1655 case MBX_SLI4_CONFIG:
1656 case MBX_SLI4_REQ_FTRS:
1657 case MBX_REG_FCFI:
1658 case MBX_UNREG_FCFI:
1659 case MBX_REG_VFI:
1660 case MBX_UNREG_VFI:
1661 case MBX_INIT_VPI:
1662 case MBX_INIT_VFI:
1663 case MBX_RESUME_RPI:
dea3101e
JB
1664 ret = mbxCommand;
1665 break;
1666 default:
1667 ret = MBX_SHUTDOWN;
1668 break;
1669 }
2e0fef85 1670 return ret;
dea3101e 1671}
e59058c4
JS
1672
1673/**
3621a710 1674 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
e59058c4
JS
1675 * @phba: Pointer to HBA context object.
1676 * @pmboxq: Pointer to mailbox command.
1677 *
1678 * This is completion handler function for mailbox commands issued from
1679 * lpfc_sli_issue_mbox_wait function. This function is called by the
1680 * mailbox event handler function with no lock held. This function
1681 * will wake up thread waiting on the wait queue pointed by context1
1682 * of the mailbox.
1683 **/
04c68496 1684void
2e0fef85 1685lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea3101e
JB
1686{
1687 wait_queue_head_t *pdone_q;
858c9f6c 1688 unsigned long drvr_flag;
dea3101e
JB
1689
1690 /*
1691 * If pdone_q is empty, the driver thread gave up waiting and
1692 * continued running.
1693 */
7054a606 1694 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
858c9f6c 1695 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea3101e
JB
1696 pdone_q = (wait_queue_head_t *) pmboxq->context1;
1697 if (pdone_q)
1698 wake_up_interruptible(pdone_q);
858c9f6c 1699 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
1700 return;
1701}
1702
e59058c4
JS
1703
1704/**
3621a710 1705 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
e59058c4
JS
1706 * @phba: Pointer to HBA context object.
1707 * @pmb: Pointer to mailbox object.
1708 *
1709 * This function is the default mailbox completion handler. It
1710 * frees the memory resources associated with the completed mailbox
1711 * command. If the completed command is a REG_LOGIN mailbox command,
1712 * this function will issue a UREG_LOGIN to re-claim the RPI.
1713 **/
dea3101e 1714void
2e0fef85 1715lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e
JB
1716{
1717 struct lpfc_dmabuf *mp;
04c68496 1718 uint16_t rpi, vpi;
7054a606
JS
1719 int rc;
1720
dea3101e 1721 mp = (struct lpfc_dmabuf *) (pmb->context1);
7054a606 1722
dea3101e
JB
1723 if (mp) {
1724 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1725 kfree(mp);
1726 }
7054a606 1727
04c68496
JS
1728 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1729 (phba->sli_rev == LPFC_SLI_REV4))
1730 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1731
7054a606
JS
1732 /*
1733 * If a REG_LOGIN succeeded after node is destroyed or node
1734 * is in re-discovery driver need to cleanup the RPI.
1735 */
2e0fef85 1736 if (!(phba->pport->load_flag & FC_UNLOADING) &&
04c68496
JS
1737 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1738 !pmb->u.mb.mbxStatus) {
1739 rpi = pmb->u.mb.un.varWords[0];
1740 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1741 lpfc_unreg_login(phba, vpi, rpi, pmb);
92d7f7b0 1742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7054a606
JS
1743 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1744 if (rc != MBX_NOT_FINISHED)
1745 return;
1746 }
1747
04c68496
JS
1748 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1749 lpfc_sli4_mbox_cmd_free(phba, pmb);
1750 else
1751 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e
JB
1752}
1753
e59058c4 1754/**
3621a710 1755 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
e59058c4
JS
1756 * @phba: Pointer to HBA context object.
1757 *
1758 * This function is called with no lock held. This function processes all
1759 * the completed mailbox commands and gives it to upper layers. The interrupt
1760 * service routine processes mailbox completion interrupt and adds completed
1761 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
1762 * Worker thread call lpfc_sli_handle_mb_event, which will return the
1763 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
1764 * function returns the mailbox commands to the upper layer by calling the
1765 * completion handler function of each mailbox.
1766 **/
dea3101e 1767int
2e0fef85 1768lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea3101e 1769{
92d7f7b0 1770 MAILBOX_t *pmbox;
dea3101e 1771 LPFC_MBOXQ_t *pmb;
92d7f7b0
JS
1772 int rc;
1773 LIST_HEAD(cmplq);
dea3101e
JB
1774
1775 phba->sli.slistat.mbox_event++;
1776
92d7f7b0
JS
1777 /* Get all completed mailboxe buffers into the cmplq */
1778 spin_lock_irq(&phba->hbalock);
1779 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
1780 spin_unlock_irq(&phba->hbalock);
dea3101e 1781
92d7f7b0
JS
1782 /* Get a Mailbox buffer to setup mailbox commands for callback */
1783 do {
1784 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
1785 if (pmb == NULL)
1786 break;
2e0fef85 1787
04c68496 1788 pmbox = &pmb->u.mb;
dea3101e 1789
858c9f6c
JS
1790 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1791 if (pmb->vport) {
1792 lpfc_debugfs_disc_trc(pmb->vport,
1793 LPFC_DISC_TRC_MBOX_VPORT,
1794 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
1795 (uint32_t)pmbox->mbxCommand,
1796 pmbox->un.varWords[0],
1797 pmbox->un.varWords[1]);
1798 }
1799 else {
1800 lpfc_debugfs_disc_trc(phba->pport,
1801 LPFC_DISC_TRC_MBOX,
1802 "MBOX cmpl: cmd:x%x mb:x%x x%x",
1803 (uint32_t)pmbox->mbxCommand,
1804 pmbox->un.varWords[0],
1805 pmbox->un.varWords[1]);
1806 }
1807 }
1808
dea3101e
JB
1809 /*
1810 * It is a fatal error if unknown mbox command completion.
1811 */
1812 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1813 MBX_SHUTDOWN) {
af901ca1 1814 /* Unknown mailbox command compl */
92d7f7b0 1815 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
e8b62011 1816 "(%d):0323 Unknown Mailbox command "
04c68496 1817 "x%x (x%x) Cmpl\n",
92d7f7b0 1818 pmb->vport ? pmb->vport->vpi : 0,
04c68496
JS
1819 pmbox->mbxCommand,
1820 lpfc_sli4_mbox_opcode_get(phba, pmb));
2e0fef85 1821 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
1822 phba->work_hs = HS_FFER3;
1823 lpfc_handle_eratt(phba);
92d7f7b0 1824 continue;
dea3101e
JB
1825 }
1826
dea3101e
JB
1827 if (pmbox->mbxStatus) {
1828 phba->sli.slistat.mbox_stat_err++;
1829 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
1830 /* Mbox cmd cmpl error - RETRYing */
92d7f7b0
JS
1831 lpfc_printf_log(phba, KERN_INFO,
1832 LOG_MBOX | LOG_SLI,
e8b62011 1833 "(%d):0305 Mbox cmd cmpl "
92d7f7b0 1834 "error - RETRYing Data: x%x "
04c68496 1835 "(x%x) x%x x%x x%x\n",
92d7f7b0
JS
1836 pmb->vport ? pmb->vport->vpi :0,
1837 pmbox->mbxCommand,
04c68496
JS
1838 lpfc_sli4_mbox_opcode_get(phba,
1839 pmb),
92d7f7b0
JS
1840 pmbox->mbxStatus,
1841 pmbox->un.varWords[0],
1842 pmb->vport->port_state);
dea3101e
JB
1843 pmbox->mbxStatus = 0;
1844 pmbox->mbxOwner = OWN_HOST;
dea3101e 1845 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
04c68496 1846 if (rc != MBX_NOT_FINISHED)
92d7f7b0 1847 continue;
dea3101e
JB
1848 }
1849 }
1850
1851 /* Mailbox cmd <cmd> Cmpl <cmpl> */
92d7f7b0 1852 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
04c68496 1853 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
dea3101e 1854 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
92d7f7b0 1855 pmb->vport ? pmb->vport->vpi : 0,
dea3101e 1856 pmbox->mbxCommand,
04c68496 1857 lpfc_sli4_mbox_opcode_get(phba, pmb),
dea3101e
JB
1858 pmb->mbox_cmpl,
1859 *((uint32_t *) pmbox),
1860 pmbox->un.varWords[0],
1861 pmbox->un.varWords[1],
1862 pmbox->un.varWords[2],
1863 pmbox->un.varWords[3],
1864 pmbox->un.varWords[4],
1865 pmbox->un.varWords[5],
1866 pmbox->un.varWords[6],
1867 pmbox->un.varWords[7]);
1868
92d7f7b0 1869 if (pmb->mbox_cmpl)
dea3101e 1870 pmb->mbox_cmpl(phba,pmb);
92d7f7b0
JS
1871 } while (1);
1872 return 0;
1873}
dea3101e 1874
e59058c4 1875/**
3621a710 1876 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
e59058c4
JS
1877 * @phba: Pointer to HBA context object.
1878 * @pring: Pointer to driver SLI ring object.
1879 * @tag: buffer tag.
1880 *
1881 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
1882 * is set in the tag the buffer is posted for a particular exchange,
1883 * the function will return the buffer without replacing the buffer.
1884 * If the buffer is for unsolicited ELS or CT traffic, this function
1885 * returns the buffer and also posts another buffer to the firmware.
1886 **/
76bb24ef
JS
1887static struct lpfc_dmabuf *
1888lpfc_sli_get_buff(struct lpfc_hba *phba,
9f1e1b50
JS
1889 struct lpfc_sli_ring *pring,
1890 uint32_t tag)
76bb24ef 1891{
9f1e1b50
JS
1892 struct hbq_dmabuf *hbq_entry;
1893
76bb24ef
JS
1894 if (tag & QUE_BUFTAG_BIT)
1895 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
9f1e1b50
JS
1896 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1897 if (!hbq_entry)
1898 return NULL;
1899 return &hbq_entry->dbuf;
76bb24ef 1900}
57127f15 1901
3772a991
JS
1902/**
1903 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1904 * @phba: Pointer to HBA context object.
1905 * @pring: Pointer to driver SLI ring object.
1906 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1907 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1908 * @fch_type: the type for the first frame of the sequence.
1909 *
1910 * This function is called with no lock held. This function uses the r_ctl and
1911 * type of the received sequence to find the correct callback function to call
1912 * to process the sequence.
1913 **/
1914static int
1915lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1916 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1917 uint32_t fch_type)
1918{
1919 int i;
1920
1921 /* unSolicited Responses */
1922 if (pring->prt[0].profile) {
1923 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1924 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1925 saveq);
1926 return 1;
1927 }
1928 /* We must search, based on rctl / type
1929 for the right routine */
1930 for (i = 0; i < pring->num_mask; i++) {
1931 if ((pring->prt[i].rctl == fch_r_ctl) &&
1932 (pring->prt[i].type == fch_type)) {
1933 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1934 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1935 (phba, pring, saveq);
1936 return 1;
1937 }
1938 }
1939 return 0;
1940}
e59058c4
JS
1941
1942/**
3621a710 1943 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
e59058c4
JS
1944 * @phba: Pointer to HBA context object.
1945 * @pring: Pointer to driver SLI ring object.
1946 * @saveq: Pointer to the unsolicited iocb.
1947 *
1948 * This function is called with no lock held by the ring event handler
1949 * when there is an unsolicited iocb posted to the response ring by the
1950 * firmware. This function gets the buffer associated with the iocbs
1951 * and calls the event handler for the ring. This function handles both
1952 * qring buffers and hbq buffers.
1953 * When the function returns 1 the caller can free the iocb object otherwise
1954 * upper layer functions will free the iocb objects.
1955 **/
dea3101e
JB
1956static int
1957lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1958 struct lpfc_iocbq *saveq)
1959{
1960 IOCB_t * irsp;
1961 WORD5 * w5p;
1962 uint32_t Rctl, Type;
3772a991 1963 uint32_t match;
76bb24ef 1964 struct lpfc_iocbq *iocbq;
3163f725 1965 struct lpfc_dmabuf *dmzbuf;
dea3101e
JB
1966
1967 match = 0;
1968 irsp = &(saveq->iocb);
57127f15
JS
1969
1970 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1971 if (pring->lpfc_sli_rcv_async_status)
1972 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
1973 else
1974 lpfc_printf_log(phba,
1975 KERN_WARNING,
1976 LOG_SLI,
1977 "0316 Ring %d handler: unexpected "
1978 "ASYNC_STATUS iocb received evt_code "
1979 "0x%x\n",
1980 pring->ringno,
1981 irsp->un.asyncstat.evt_code);
1982 return 1;
1983 }
1984
3163f725
JS
1985 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
1986 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
1987 if (irsp->ulpBdeCount > 0) {
1988 dmzbuf = lpfc_sli_get_buff(phba, pring,
1989 irsp->un.ulpWord[3]);
1990 lpfc_in_buf_free(phba, dmzbuf);
1991 }
1992
1993 if (irsp->ulpBdeCount > 1) {
1994 dmzbuf = lpfc_sli_get_buff(phba, pring,
1995 irsp->unsli3.sli3Words[3]);
1996 lpfc_in_buf_free(phba, dmzbuf);
1997 }
1998
1999 if (irsp->ulpBdeCount > 2) {
2000 dmzbuf = lpfc_sli_get_buff(phba, pring,
2001 irsp->unsli3.sli3Words[7]);
2002 lpfc_in_buf_free(phba, dmzbuf);
2003 }
2004
2005 return 1;
2006 }
2007
92d7f7b0 2008 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
76bb24ef
JS
2009 if (irsp->ulpBdeCount != 0) {
2010 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2011 irsp->un.ulpWord[3]);
2012 if (!saveq->context2)
2013 lpfc_printf_log(phba,
2014 KERN_ERR,
2015 LOG_SLI,
2016 "0341 Ring %d Cannot find buffer for "
2017 "an unsolicited iocb. tag 0x%x\n",
2018 pring->ringno,
2019 irsp->un.ulpWord[3]);
76bb24ef
JS
2020 }
2021 if (irsp->ulpBdeCount == 2) {
2022 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2023 irsp->unsli3.sli3Words[7]);
2024 if (!saveq->context3)
2025 lpfc_printf_log(phba,
2026 KERN_ERR,
2027 LOG_SLI,
2028 "0342 Ring %d Cannot find buffer for an"
2029 " unsolicited iocb. tag 0x%x\n",
2030 pring->ringno,
2031 irsp->unsli3.sli3Words[7]);
2032 }
2033 list_for_each_entry(iocbq, &saveq->list, list) {
76bb24ef 2034 irsp = &(iocbq->iocb);
76bb24ef
JS
2035 if (irsp->ulpBdeCount != 0) {
2036 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2037 irsp->un.ulpWord[3]);
9c2face6 2038 if (!iocbq->context2)
76bb24ef
JS
2039 lpfc_printf_log(phba,
2040 KERN_ERR,
2041 LOG_SLI,
2042 "0343 Ring %d Cannot find "
2043 "buffer for an unsolicited iocb"
2044 ". tag 0x%x\n", pring->ringno,
92d7f7b0 2045 irsp->un.ulpWord[3]);
76bb24ef
JS
2046 }
2047 if (irsp->ulpBdeCount == 2) {
2048 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
51ef4c26 2049 irsp->unsli3.sli3Words[7]);
9c2face6 2050 if (!iocbq->context3)
76bb24ef
JS
2051 lpfc_printf_log(phba,
2052 KERN_ERR,
2053 LOG_SLI,
2054 "0344 Ring %d Cannot find "
2055 "buffer for an unsolicited "
2056 "iocb. tag 0x%x\n",
2057 pring->ringno,
2058 irsp->unsli3.sli3Words[7]);
2059 }
2060 }
92d7f7b0 2061 }
9c2face6
JS
2062 if (irsp->ulpBdeCount != 0 &&
2063 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2064 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2065 int found = 0;
2066
2067 /* search continue save q for same XRI */
2068 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2069 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2070 list_add_tail(&saveq->list, &iocbq->list);
2071 found = 1;
2072 break;
2073 }
2074 }
2075 if (!found)
2076 list_add_tail(&saveq->clist,
2077 &pring->iocb_continue_saveq);
2078 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2079 list_del_init(&iocbq->clist);
2080 saveq = iocbq;
2081 irsp = &(saveq->iocb);
2082 } else
2083 return 0;
2084 }
2085 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2086 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2087 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
6a9c52cf
JS
2088 Rctl = FC_RCTL_ELS_REQ;
2089 Type = FC_TYPE_ELS;
9c2face6
JS
2090 } else {
2091 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2092 Rctl = w5p->hcsw.Rctl;
2093 Type = w5p->hcsw.Type;
2094
2095 /* Firmware Workaround */
2096 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2097 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2098 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
6a9c52cf
JS
2099 Rctl = FC_RCTL_ELS_REQ;
2100 Type = FC_TYPE_ELS;
9c2face6
JS
2101 w5p->hcsw.Rctl = Rctl;
2102 w5p->hcsw.Type = Type;
2103 }
2104 }
92d7f7b0 2105
3772a991 2106 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
92d7f7b0 2107 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2108 "0313 Ring %d handler: unexpected Rctl x%x "
92d7f7b0 2109 "Type x%x received\n",
e8b62011 2110 pring->ringno, Rctl, Type);
3772a991 2111
92d7f7b0 2112 return 1;
dea3101e
JB
2113}
2114
e59058c4 2115/**
3621a710 2116 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
e59058c4
JS
2117 * @phba: Pointer to HBA context object.
2118 * @pring: Pointer to driver SLI ring object.
2119 * @prspiocb: Pointer to response iocb object.
2120 *
2121 * This function looks up the iocb_lookup table to get the command iocb
2122 * corresponding to the given response iocb using the iotag of the
2123 * response iocb. This function is called with the hbalock held.
2124 * This function returns the command iocb object if it finds the command
2125 * iocb else returns NULL.
2126 **/
dea3101e 2127static struct lpfc_iocbq *
2e0fef85
JS
2128lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2129 struct lpfc_sli_ring *pring,
2130 struct lpfc_iocbq *prspiocb)
dea3101e 2131{
dea3101e
JB
2132 struct lpfc_iocbq *cmd_iocb = NULL;
2133 uint16_t iotag;
2134
604a3e30
JB
2135 iotag = prspiocb->iocb.ulpIoTag;
2136
2137 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2138 cmd_iocb = phba->sli.iocbq_lookup[iotag];
92d7f7b0 2139 list_del_init(&cmd_iocb->list);
604a3e30
JB
2140 pring->txcmplq_cnt--;
2141 return cmd_iocb;
dea3101e
JB
2142 }
2143
dea3101e 2144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2145 "0317 iotag x%x is out off "
604a3e30 2146 "range: max iotag x%x wd0 x%x\n",
e8b62011 2147 iotag, phba->sli.last_iotag,
604a3e30 2148 *(((uint32_t *) &prspiocb->iocb) + 7));
dea3101e
JB
2149 return NULL;
2150}
2151
3772a991
JS
2152/**
2153 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2154 * @phba: Pointer to HBA context object.
2155 * @pring: Pointer to driver SLI ring object.
2156 * @iotag: IOCB tag.
2157 *
2158 * This function looks up the iocb_lookup table to get the command iocb
2159 * corresponding to the given iotag. This function is called with the
2160 * hbalock held.
2161 * This function returns the command iocb object if it finds the command
2162 * iocb else returns NULL.
2163 **/
2164static struct lpfc_iocbq *
2165lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2166 struct lpfc_sli_ring *pring, uint16_t iotag)
2167{
2168 struct lpfc_iocbq *cmd_iocb;
2169
2170 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2171 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2172 list_del_init(&cmd_iocb->list);
2173 pring->txcmplq_cnt--;
2174 return cmd_iocb;
2175 }
2176
2177 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2178 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2179 iotag, phba->sli.last_iotag);
2180 return NULL;
2181}
2182
e59058c4 2183/**
3621a710 2184 * lpfc_sli_process_sol_iocb - process solicited iocb completion
e59058c4
JS
2185 * @phba: Pointer to HBA context object.
2186 * @pring: Pointer to driver SLI ring object.
2187 * @saveq: Pointer to the response iocb to be processed.
2188 *
2189 * This function is called by the ring event handler for non-fcp
2190 * rings when there is a new response iocb in the response ring.
2191 * The caller is not required to hold any locks. This function
2192 * gets the command iocb associated with the response iocb and
2193 * calls the completion handler for the command iocb. If there
2194 * is no completion handler, the function will free the resources
2195 * associated with command iocb. If the response iocb is for
2196 * an already aborted command iocb, the status of the completion
2197 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2198 * This function always returns 1.
2199 **/
dea3101e 2200static int
2e0fef85 2201lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea3101e
JB
2202 struct lpfc_iocbq *saveq)
2203{
2e0fef85 2204 struct lpfc_iocbq *cmdiocbp;
dea3101e
JB
2205 int rc = 1;
2206 unsigned long iflag;
2207
2208 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2e0fef85 2209 spin_lock_irqsave(&phba->hbalock, iflag);
604a3e30 2210 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2e0fef85
JS
2211 spin_unlock_irqrestore(&phba->hbalock, iflag);
2212
dea3101e
JB
2213 if (cmdiocbp) {
2214 if (cmdiocbp->iocb_cmpl) {
ea2151b4
JS
2215 /*
2216 * If an ELS command failed send an event to mgmt
2217 * application.
2218 */
2219 if (saveq->iocb.ulpStatus &&
2220 (pring->ringno == LPFC_ELS_RING) &&
2221 (cmdiocbp->iocb.ulpCommand ==
2222 CMD_ELS_REQUEST64_CR))
2223 lpfc_send_els_failure_event(phba,
2224 cmdiocbp, saveq);
2225
dea3101e
JB
2226 /*
2227 * Post all ELS completions to the worker thread.
2228 * All other are passed to the completion callback.
2229 */
2230 if (pring->ringno == LPFC_ELS_RING) {
07951076
JS
2231 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
2232 cmdiocbp->iocb_flag &=
2233 ~LPFC_DRIVER_ABORTED;
2234 saveq->iocb.ulpStatus =
2235 IOSTAT_LOCAL_REJECT;
2236 saveq->iocb.un.ulpWord[4] =
2237 IOERR_SLI_ABORTED;
0ff10d46
JS
2238
2239 /* Firmware could still be in progress
2240 * of DMAing payload, so don't free data
2241 * buffer till after a hbeat.
2242 */
2243 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
07951076 2244 }
dea3101e 2245 }
2e0fef85 2246 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
604a3e30
JB
2247 } else
2248 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea3101e
JB
2249 } else {
2250 /*
2251 * Unknown initiating command based on the response iotag.
2252 * This could be the case on the ELS ring because of
2253 * lpfc_els_abort().
2254 */
2255 if (pring->ringno != LPFC_ELS_RING) {
2256 /*
2257 * Ring <ringno> handler: unexpected completion IoTag
2258 * <IoTag>
2259 */
a257bf90 2260 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011
JS
2261 "0322 Ring %d handler: "
2262 "unexpected completion IoTag x%x "
2263 "Data: x%x x%x x%x x%x\n",
2264 pring->ringno,
2265 saveq->iocb.ulpIoTag,
2266 saveq->iocb.ulpStatus,
2267 saveq->iocb.un.ulpWord[4],
2268 saveq->iocb.ulpCommand,
2269 saveq->iocb.ulpContext);
dea3101e
JB
2270 }
2271 }
68876920 2272
dea3101e
JB
2273 return rc;
2274}
2275
e59058c4 2276/**
3621a710 2277 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
e59058c4
JS
2278 * @phba: Pointer to HBA context object.
2279 * @pring: Pointer to driver SLI ring object.
2280 *
2281 * This function is called from the iocb ring event handlers when
2282 * put pointer is ahead of the get pointer for a ring. This function signal
2283 * an error attention condition to the worker thread and the worker
2284 * thread will transition the HBA to offline state.
2285 **/
2e0fef85
JS
2286static void
2287lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
875fbdfe 2288{
34b02dcd 2289 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
875fbdfe 2290 /*
025dfdaf 2291 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
875fbdfe
JSEC
2292 * rsp ring <portRspMax>
2293 */
2294 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2295 "0312 Ring %d handler: portRspPut %d "
025dfdaf 2296 "is bigger than rsp ring %d\n",
e8b62011 2297 pring->ringno, le32_to_cpu(pgp->rspPutInx),
875fbdfe
JSEC
2298 pring->numRiocb);
2299
2e0fef85 2300 phba->link_state = LPFC_HBA_ERROR;
875fbdfe
JSEC
2301
2302 /*
2303 * All error attention handlers are posted to
2304 * worker thread
2305 */
2306 phba->work_ha |= HA_ERATT;
2307 phba->work_hs = HS_FFER3;
92d7f7b0 2308
5e9d9b82 2309 lpfc_worker_wake_up(phba);
875fbdfe
JSEC
2310
2311 return;
2312}
2313
9399627f 2314/**
3621a710 2315 * lpfc_poll_eratt - Error attention polling timer timeout handler
9399627f
JS
2316 * @ptr: Pointer to address of HBA context object.
2317 *
2318 * This function is invoked by the Error Attention polling timer when the
2319 * timer times out. It will check the SLI Error Attention register for
2320 * possible attention events. If so, it will post an Error Attention event
2321 * and wake up worker thread to process it. Otherwise, it will set up the
2322 * Error Attention polling timer for the next poll.
2323 **/
2324void lpfc_poll_eratt(unsigned long ptr)
2325{
2326 struct lpfc_hba *phba;
2327 uint32_t eratt = 0;
2328
2329 phba = (struct lpfc_hba *)ptr;
2330
2331 /* Check chip HA register for error event */
2332 eratt = lpfc_sli_check_eratt(phba);
2333
2334 if (eratt)
2335 /* Tell the worker thread there is work to do */
2336 lpfc_worker_wake_up(phba);
2337 else
2338 /* Restart the timer for next eratt poll */
2339 mod_timer(&phba->eratt_poll, jiffies +
2340 HZ * LPFC_ERATT_POLL_INTERVAL);
2341 return;
2342}
2343
875fbdfe 2344
e59058c4 2345/**
3621a710 2346 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
e59058c4
JS
2347 * @phba: Pointer to HBA context object.
2348 * @pring: Pointer to driver SLI ring object.
2349 * @mask: Host attention register mask for this ring.
2350 *
2351 * This function is called from the interrupt context when there is a ring
2352 * event for the fcp ring. The caller does not hold any lock.
2353 * The function processes each response iocb in the response ring until it
2354 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2355 * LE bit set. The function will call the completion handler of the command iocb
2356 * if the response iocb indicates a completion for a command iocb or it is
2357 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2358 * function if this is an unsolicited iocb.
dea3101e 2359 * This routine presumes LPFC_FCP_RING handling and doesn't bother
45ed1190
JS
2360 * to check it explicitly.
2361 */
2362int
2e0fef85
JS
2363lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2364 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 2365{
34b02dcd 2366 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea3101e 2367 IOCB_t *irsp = NULL;
87f6eaff 2368 IOCB_t *entry = NULL;
dea3101e
JB
2369 struct lpfc_iocbq *cmdiocbq = NULL;
2370 struct lpfc_iocbq rspiocbq;
dea3101e
JB
2371 uint32_t status;
2372 uint32_t portRspPut, portRspMax;
2373 int rc = 1;
2374 lpfc_iocb_type type;
2375 unsigned long iflag;
2376 uint32_t rsp_cmpl = 0;
dea3101e 2377
2e0fef85 2378 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
2379 pring->stats.iocb_event++;
2380
dea3101e
JB
2381 /*
2382 * The next available response entry should never exceed the maximum
2383 * entries. If it does, treat it as an adapter hardware error.
2384 */
2385 portRspMax = pring->numRiocb;
2386 portRspPut = le32_to_cpu(pgp->rspPutInx);
2387 if (unlikely(portRspPut >= portRspMax)) {
875fbdfe 2388 lpfc_sli_rsp_pointers_error(phba, pring);
2e0fef85 2389 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
2390 return 1;
2391 }
45ed1190
JS
2392 if (phba->fcp_ring_in_use) {
2393 spin_unlock_irqrestore(&phba->hbalock, iflag);
2394 return 1;
2395 } else
2396 phba->fcp_ring_in_use = 1;
dea3101e
JB
2397
2398 rmb();
2399 while (pring->rspidx != portRspPut) {
87f6eaff
JSEC
2400 /*
2401 * Fetch an entry off the ring and copy it into a local data
2402 * structure. The copy involves a byte-swap since the
2403 * network byte order and pci byte orders are different.
2404 */
ed957684 2405 entry = lpfc_resp_iocb(phba, pring);
858c9f6c 2406 phba->last_completion_time = jiffies;
875fbdfe
JSEC
2407
2408 if (++pring->rspidx >= portRspMax)
2409 pring->rspidx = 0;
2410
87f6eaff
JSEC
2411 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2412 (uint32_t *) &rspiocbq.iocb,
ed957684 2413 phba->iocb_rsp_size);
a4bc3379 2414 INIT_LIST_HEAD(&(rspiocbq.list));
87f6eaff
JSEC
2415 irsp = &rspiocbq.iocb;
2416
dea3101e
JB
2417 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2418 pring->stats.iocb_rsp++;
2419 rsp_cmpl++;
2420
2421 if (unlikely(irsp->ulpStatus)) {
92d7f7b0
JS
2422 /*
2423 * If resource errors reported from HBA, reduce
2424 * queuedepths of the SCSI device.
2425 */
2426 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2427 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2428 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 2429 phba->lpfc_rampdown_queue_depth(phba);
92d7f7b0
JS
2430 spin_lock_irqsave(&phba->hbalock, iflag);
2431 }
2432
dea3101e
JB
2433 /* Rsp ring <ringno> error: IOCB */
2434 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
e8b62011 2435 "0336 Rsp Ring %d error: IOCB Data: "
92d7f7b0 2436 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
e8b62011 2437 pring->ringno,
92d7f7b0
JS
2438 irsp->un.ulpWord[0],
2439 irsp->un.ulpWord[1],
2440 irsp->un.ulpWord[2],
2441 irsp->un.ulpWord[3],
2442 irsp->un.ulpWord[4],
2443 irsp->un.ulpWord[5],
d7c255b2
JS
2444 *(uint32_t *)&irsp->un1,
2445 *((uint32_t *)&irsp->un1 + 1));
dea3101e
JB
2446 }
2447
2448 switch (type) {
2449 case LPFC_ABORT_IOCB:
2450 case LPFC_SOL_IOCB:
2451 /*
2452 * Idle exchange closed via ABTS from port. No iocb
2453 * resources need to be recovered.
2454 */
2455 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
dca9479b 2456 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 2457 "0333 IOCB cmd 0x%x"
dca9479b 2458 " processed. Skipping"
92d7f7b0 2459 " completion\n",
dca9479b 2460 irsp->ulpCommand);
dea3101e
JB
2461 break;
2462 }
2463
604a3e30
JB
2464 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2465 &rspiocbq);
dea3101e 2466 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2e0fef85
JS
2467 spin_unlock_irqrestore(&phba->hbalock,
2468 iflag);
b808608b
JW
2469 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2470 &rspiocbq);
2e0fef85 2471 spin_lock_irqsave(&phba->hbalock,
b808608b
JW
2472 iflag);
2473 }
dea3101e 2474 break;
a4bc3379 2475 case LPFC_UNSOL_IOCB:
2e0fef85 2476 spin_unlock_irqrestore(&phba->hbalock, iflag);
a4bc3379 2477 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2e0fef85 2478 spin_lock_irqsave(&phba->hbalock, iflag);
a4bc3379 2479 break;
dea3101e
JB
2480 default:
2481 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2482 char adaptermsg[LPFC_MAX_ADPTMSG];
2483 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2484 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2485 MAX_MSG_DATA);
898eb71c
JP
2486 dev_warn(&((phba->pcidev)->dev),
2487 "lpfc%d: %s\n",
dea3101e
JB
2488 phba->brd_no, adaptermsg);
2489 } else {
2490 /* Unknown IOCB command */
2491 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2492 "0334 Unknown IOCB command "
92d7f7b0 2493 "Data: x%x, x%x x%x x%x x%x\n",
e8b62011 2494 type, irsp->ulpCommand,
92d7f7b0
JS
2495 irsp->ulpStatus,
2496 irsp->ulpIoTag,
2497 irsp->ulpContext);
dea3101e
JB
2498 }
2499 break;
2500 }
2501
2502 /*
2503 * The response IOCB has been processed. Update the ring
2504 * pointer in SLIM. If the port response put pointer has not
2505 * been updated, sync the pgp->rspPutInx and fetch the new port
2506 * response put pointer.
2507 */
ed957684 2508 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea3101e
JB
2509
2510 if (pring->rspidx == portRspPut)
2511 portRspPut = le32_to_cpu(pgp->rspPutInx);
2512 }
2513
2514 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2515 pring->stats.iocb_rsp_full++;
2516 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2517 writel(status, phba->CAregaddr);
2518 readl(phba->CAregaddr);
2519 }
2520 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2521 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2522 pring->stats.iocb_cmd_empty++;
2523
2524 /* Force update of the local copy of cmdGetInx */
2525 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2526 lpfc_sli_resume_iocb(phba, pring);
2527
2528 if ((pring->lpfc_sli_cmd_available))
2529 (pring->lpfc_sli_cmd_available) (phba, pring);
2530
2531 }
2532
45ed1190 2533 phba->fcp_ring_in_use = 0;
2e0fef85 2534 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
2535 return rc;
2536}
2537
e59058c4 2538/**
3772a991
JS
2539 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2540 * @phba: Pointer to HBA context object.
2541 * @pring: Pointer to driver SLI ring object.
2542 * @rspiocbp: Pointer to driver response IOCB object.
2543 *
2544 * This function is called from the worker thread when there is a slow-path
2545 * response IOCB to process. This function chains all the response iocbs until
2546 * seeing the iocb with the LE bit set. The function will call
2547 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2548 * completion of a command iocb. The function will call the
2549 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2550 * The function frees the resources or calls the completion handler if this
2551 * iocb is an abort completion. The function returns NULL when the response
2552 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2553 * this function shall chain the iocb on to the iocb_continueq and return the
2554 * response iocb passed in.
2555 **/
2556static struct lpfc_iocbq *
2557lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2558 struct lpfc_iocbq *rspiocbp)
2559{
2560 struct lpfc_iocbq *saveq;
2561 struct lpfc_iocbq *cmdiocbp;
2562 struct lpfc_iocbq *next_iocb;
2563 IOCB_t *irsp = NULL;
2564 uint32_t free_saveq;
2565 uint8_t iocb_cmd_type;
2566 lpfc_iocb_type type;
2567 unsigned long iflag;
2568 int rc;
2569
2570 spin_lock_irqsave(&phba->hbalock, iflag);
2571 /* First add the response iocb to the countinueq list */
2572 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2573 pring->iocb_continueq_cnt++;
2574
2575 /* Now, determine whetehr the list is completed for processing */
2576 irsp = &rspiocbp->iocb;
2577 if (irsp->ulpLe) {
2578 /*
2579 * By default, the driver expects to free all resources
2580 * associated with this iocb completion.
2581 */
2582 free_saveq = 1;
2583 saveq = list_get_first(&pring->iocb_continueq,
2584 struct lpfc_iocbq, list);
2585 irsp = &(saveq->iocb);
2586 list_del_init(&pring->iocb_continueq);
2587 pring->iocb_continueq_cnt = 0;
2588
2589 pring->stats.iocb_rsp++;
2590
2591 /*
2592 * If resource errors reported from HBA, reduce
2593 * queuedepths of the SCSI device.
2594 */
2595 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2596 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2597 spin_unlock_irqrestore(&phba->hbalock, iflag);
2598 phba->lpfc_rampdown_queue_depth(phba);
2599 spin_lock_irqsave(&phba->hbalock, iflag);
2600 }
2601
2602 if (irsp->ulpStatus) {
2603 /* Rsp ring <ringno> error: IOCB */
2604 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2605 "0328 Rsp Ring %d error: "
2606 "IOCB Data: "
2607 "x%x x%x x%x x%x "
2608 "x%x x%x x%x x%x "
2609 "x%x x%x x%x x%x "
2610 "x%x x%x x%x x%x\n",
2611 pring->ringno,
2612 irsp->un.ulpWord[0],
2613 irsp->un.ulpWord[1],
2614 irsp->un.ulpWord[2],
2615 irsp->un.ulpWord[3],
2616 irsp->un.ulpWord[4],
2617 irsp->un.ulpWord[5],
2618 *(((uint32_t *) irsp) + 6),
2619 *(((uint32_t *) irsp) + 7),
2620 *(((uint32_t *) irsp) + 8),
2621 *(((uint32_t *) irsp) + 9),
2622 *(((uint32_t *) irsp) + 10),
2623 *(((uint32_t *) irsp) + 11),
2624 *(((uint32_t *) irsp) + 12),
2625 *(((uint32_t *) irsp) + 13),
2626 *(((uint32_t *) irsp) + 14),
2627 *(((uint32_t *) irsp) + 15));
2628 }
2629
2630 /*
2631 * Fetch the IOCB command type and call the correct completion
2632 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2633 * get freed back to the lpfc_iocb_list by the discovery
2634 * kernel thread.
2635 */
2636 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2637 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2638 switch (type) {
2639 case LPFC_SOL_IOCB:
2640 spin_unlock_irqrestore(&phba->hbalock, iflag);
2641 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2642 spin_lock_irqsave(&phba->hbalock, iflag);
2643 break;
2644
2645 case LPFC_UNSOL_IOCB:
2646 spin_unlock_irqrestore(&phba->hbalock, iflag);
2647 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2648 spin_lock_irqsave(&phba->hbalock, iflag);
2649 if (!rc)
2650 free_saveq = 0;
2651 break;
2652
2653 case LPFC_ABORT_IOCB:
2654 cmdiocbp = NULL;
2655 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2656 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2657 saveq);
2658 if (cmdiocbp) {
2659 /* Call the specified completion routine */
2660 if (cmdiocbp->iocb_cmpl) {
2661 spin_unlock_irqrestore(&phba->hbalock,
2662 iflag);
2663 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2664 saveq);
2665 spin_lock_irqsave(&phba->hbalock,
2666 iflag);
2667 } else
2668 __lpfc_sli_release_iocbq(phba,
2669 cmdiocbp);
2670 }
2671 break;
2672
2673 case LPFC_UNKNOWN_IOCB:
2674 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2675 char adaptermsg[LPFC_MAX_ADPTMSG];
2676 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2677 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2678 MAX_MSG_DATA);
2679 dev_warn(&((phba->pcidev)->dev),
2680 "lpfc%d: %s\n",
2681 phba->brd_no, adaptermsg);
2682 } else {
2683 /* Unknown IOCB command */
2684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2685 "0335 Unknown IOCB "
2686 "command Data: x%x "
2687 "x%x x%x x%x\n",
2688 irsp->ulpCommand,
2689 irsp->ulpStatus,
2690 irsp->ulpIoTag,
2691 irsp->ulpContext);
2692 }
2693 break;
2694 }
2695
2696 if (free_saveq) {
2697 list_for_each_entry_safe(rspiocbp, next_iocb,
2698 &saveq->list, list) {
2699 list_del(&rspiocbp->list);
2700 __lpfc_sli_release_iocbq(phba, rspiocbp);
2701 }
2702 __lpfc_sli_release_iocbq(phba, saveq);
2703 }
2704 rspiocbp = NULL;
2705 }
2706 spin_unlock_irqrestore(&phba->hbalock, iflag);
2707 return rspiocbp;
2708}
2709
2710/**
2711 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
e59058c4
JS
2712 * @phba: Pointer to HBA context object.
2713 * @pring: Pointer to driver SLI ring object.
2714 * @mask: Host attention register mask for this ring.
2715 *
3772a991
JS
2716 * This routine wraps the actual slow_ring event process routine from the
2717 * API jump table function pointer from the lpfc_hba struct.
e59058c4 2718 **/
3772a991 2719void
2e0fef85
JS
2720lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2721 struct lpfc_sli_ring *pring, uint32_t mask)
3772a991
JS
2722{
2723 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2724}
2725
2726/**
2727 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2728 * @phba: Pointer to HBA context object.
2729 * @pring: Pointer to driver SLI ring object.
2730 * @mask: Host attention register mask for this ring.
2731 *
2732 * This function is called from the worker thread when there is a ring event
2733 * for non-fcp rings. The caller does not hold any lock. The function will
2734 * remove each response iocb in the response ring and calls the handle
2735 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2736 **/
2737static void
2738lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2739 struct lpfc_sli_ring *pring, uint32_t mask)
dea3101e 2740{
34b02dcd 2741 struct lpfc_pgp *pgp;
dea3101e
JB
2742 IOCB_t *entry;
2743 IOCB_t *irsp = NULL;
2744 struct lpfc_iocbq *rspiocbp = NULL;
dea3101e 2745 uint32_t portRspPut, portRspMax;
dea3101e 2746 unsigned long iflag;
3772a991 2747 uint32_t status;
dea3101e 2748
34b02dcd 2749 pgp = &phba->port_gp[pring->ringno];
2e0fef85 2750 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
2751 pring->stats.iocb_event++;
2752
dea3101e
JB
2753 /*
2754 * The next available response entry should never exceed the maximum
2755 * entries. If it does, treat it as an adapter hardware error.
2756 */
2757 portRspMax = pring->numRiocb;
2758 portRspPut = le32_to_cpu(pgp->rspPutInx);
2759 if (portRspPut >= portRspMax) {
2760 /*
025dfdaf 2761 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea3101e
JB
2762 * rsp ring <portRspMax>
2763 */
ed957684 2764 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011 2765 "0303 Ring %d handler: portRspPut %d "
025dfdaf 2766 "is bigger than rsp ring %d\n",
e8b62011 2767 pring->ringno, portRspPut, portRspMax);
dea3101e 2768
2e0fef85
JS
2769 phba->link_state = LPFC_HBA_ERROR;
2770 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
2771
2772 phba->work_hs = HS_FFER3;
2773 lpfc_handle_eratt(phba);
2774
3772a991 2775 return;
dea3101e
JB
2776 }
2777
2778 rmb();
dea3101e
JB
2779 while (pring->rspidx != portRspPut) {
2780 /*
2781 * Build a completion list and call the appropriate handler.
2782 * The process is to get the next available response iocb, get
2783 * a free iocb from the list, copy the response data into the
2784 * free iocb, insert to the continuation list, and update the
2785 * next response index to slim. This process makes response
2786 * iocb's in the ring available to DMA as fast as possible but
2787 * pays a penalty for a copy operation. Since the iocb is
2788 * only 32 bytes, this penalty is considered small relative to
2789 * the PCI reads for register values and a slim write. When
2790 * the ulpLe field is set, the entire Command has been
2791 * received.
2792 */
ed957684
JS
2793 entry = lpfc_resp_iocb(phba, pring);
2794
858c9f6c 2795 phba->last_completion_time = jiffies;
2e0fef85 2796 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
2797 if (rspiocbp == NULL) {
2798 printk(KERN_ERR "%s: out of buffers! Failing "
cadbd4a5 2799 "completion.\n", __func__);
dea3101e
JB
2800 break;
2801 }
2802
ed957684
JS
2803 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
2804 phba->iocb_rsp_size);
dea3101e
JB
2805 irsp = &rspiocbp->iocb;
2806
2807 if (++pring->rspidx >= portRspMax)
2808 pring->rspidx = 0;
2809
a58cbd52
JS
2810 if (pring->ringno == LPFC_ELS_RING) {
2811 lpfc_debugfs_slow_ring_trc(phba,
2812 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2813 *(((uint32_t *) irsp) + 4),
2814 *(((uint32_t *) irsp) + 6),
2815 *(((uint32_t *) irsp) + 7));
2816 }
2817
ed957684 2818 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea3101e 2819
3772a991
JS
2820 spin_unlock_irqrestore(&phba->hbalock, iflag);
2821 /* Handle the response IOCB */
2822 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2823 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
2824
2825 /*
2826 * If the port response put pointer has not been updated, sync
2827 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
2828 * response put pointer.
2829 */
2830 if (pring->rspidx == portRspPut) {
2831 portRspPut = le32_to_cpu(pgp->rspPutInx);
2832 }
2833 } /* while (pring->rspidx != portRspPut) */
2834
92d7f7b0 2835 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea3101e
JB
2836 /* At least one response entry has been freed */
2837 pring->stats.iocb_rsp_full++;
2838 /* SET RxRE_RSP in Chip Att register */
2839 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2840 writel(status, phba->CAregaddr);
2841 readl(phba->CAregaddr); /* flush */
2842 }
2843 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2844 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2845 pring->stats.iocb_cmd_empty++;
2846
2847 /* Force update of the local copy of cmdGetInx */
2848 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2849 lpfc_sli_resume_iocb(phba, pring);
2850
2851 if ((pring->lpfc_sli_cmd_available))
2852 (pring->lpfc_sli_cmd_available) (phba, pring);
2853
2854 }
2855
2e0fef85 2856 spin_unlock_irqrestore(&phba->hbalock, iflag);
3772a991 2857 return;
dea3101e
JB
2858}
2859
4f774513
JS
2860/**
2861 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
2862 * @phba: Pointer to HBA context object.
2863 * @pring: Pointer to driver SLI ring object.
2864 * @mask: Host attention register mask for this ring.
2865 *
2866 * This function is called from the worker thread when there is a pending
2867 * ELS response iocb on the driver internal slow-path response iocb worker
2868 * queue. The caller does not hold any lock. The function will remove each
2869 * response iocb from the response worker queue and calls the handle
2870 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2871 **/
2872static void
2873lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
2874 struct lpfc_sli_ring *pring, uint32_t mask)
2875{
2876 struct lpfc_iocbq *irspiocbq;
4d9ab994
JS
2877 struct hbq_dmabuf *dmabuf;
2878 struct lpfc_cq_event *cq_event;
4f774513
JS
2879 unsigned long iflag;
2880
45ed1190
JS
2881 spin_lock_irqsave(&phba->hbalock, iflag);
2882 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2883 spin_unlock_irqrestore(&phba->hbalock, iflag);
2884 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4f774513
JS
2885 /* Get the response iocb from the head of work queue */
2886 spin_lock_irqsave(&phba->hbalock, iflag);
45ed1190 2887 list_remove_head(&phba->sli4_hba.sp_queue_event,
4d9ab994 2888 cq_event, struct lpfc_cq_event, list);
4f774513 2889 spin_unlock_irqrestore(&phba->hbalock, iflag);
4d9ab994
JS
2890
2891 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2892 case CQE_CODE_COMPL_WQE:
2893 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2894 cq_event);
45ed1190
JS
2895 /* Translate ELS WCQE to response IOCBQ */
2896 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2897 irspiocbq);
2898 if (irspiocbq)
2899 lpfc_sli_sp_handle_rspiocb(phba, pring,
2900 irspiocbq);
4d9ab994
JS
2901 break;
2902 case CQE_CODE_RECEIVE:
2903 dmabuf = container_of(cq_event, struct hbq_dmabuf,
2904 cq_event);
2905 lpfc_sli4_handle_received_buffer(phba, dmabuf);
2906 break;
2907 default:
2908 break;
2909 }
4f774513
JS
2910 }
2911}
2912
e59058c4 2913/**
3621a710 2914 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
e59058c4
JS
2915 * @phba: Pointer to HBA context object.
2916 * @pring: Pointer to driver SLI ring object.
2917 *
2918 * This function aborts all iocbs in the given ring and frees all the iocb
2919 * objects in txq. This function issues an abort iocb for all the iocb commands
2920 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
2921 * the return of this function. The caller is not required to hold any locks.
2922 **/
2e0fef85 2923void
dea3101e
JB
2924lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2925{
2534ba75 2926 LIST_HEAD(completions);
dea3101e 2927 struct lpfc_iocbq *iocb, *next_iocb;
dea3101e 2928
92d7f7b0
JS
2929 if (pring->ringno == LPFC_ELS_RING) {
2930 lpfc_fabric_abort_hba(phba);
2931 }
2932
dea3101e
JB
2933 /* Error everything on txq and txcmplq
2934 * First do the txq.
2935 */
2e0fef85 2936 spin_lock_irq(&phba->hbalock);
2534ba75 2937 list_splice_init(&pring->txq, &completions);
dea3101e 2938 pring->txq_cnt = 0;
dea3101e
JB
2939
2940 /* Next issue ABTS for everything on the txcmplq */
2534ba75
JS
2941 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
2942 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
dea3101e 2943
2e0fef85 2944 spin_unlock_irq(&phba->hbalock);
dea3101e 2945
a257bf90
JS
2946 /* Cancel all the IOCBs from the completions list */
2947 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
2948 IOERR_SLI_ABORTED);
dea3101e
JB
2949}
2950
a8e497d5 2951/**
3621a710 2952 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
a8e497d5
JS
2953 * @phba: Pointer to HBA context object.
2954 *
2955 * This function flushes all iocbs in the fcp ring and frees all the iocb
2956 * objects in txq and txcmplq. This function will not issue abort iocbs
2957 * for all the iocb commands in txcmplq, they will just be returned with
2958 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2959 * slot has been permanently disabled.
2960 **/
2961void
2962lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2963{
2964 LIST_HEAD(txq);
2965 LIST_HEAD(txcmplq);
a8e497d5
JS
2966 struct lpfc_sli *psli = &phba->sli;
2967 struct lpfc_sli_ring *pring;
2968
2969 /* Currently, only one fcp ring */
2970 pring = &psli->ring[psli->fcp_ring];
2971
2972 spin_lock_irq(&phba->hbalock);
2973 /* Retrieve everything on txq */
2974 list_splice_init(&pring->txq, &txq);
2975 pring->txq_cnt = 0;
2976
2977 /* Retrieve everything on the txcmplq */
2978 list_splice_init(&pring->txcmplq, &txcmplq);
2979 pring->txcmplq_cnt = 0;
2980 spin_unlock_irq(&phba->hbalock);
2981
2982 /* Flush the txq */
a257bf90
JS
2983 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
2984 IOERR_SLI_DOWN);
a8e497d5
JS
2985
2986 /* Flush the txcmpq */
a257bf90
JS
2987 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
2988 IOERR_SLI_DOWN);
a8e497d5
JS
2989}
2990
e59058c4 2991/**
3772a991 2992 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
e59058c4
JS
2993 * @phba: Pointer to HBA context object.
2994 * @mask: Bit mask to be checked.
2995 *
2996 * This function reads the host status register and compares
2997 * with the provided bit mask to check if HBA completed
2998 * the restart. This function will wait in a loop for the
2999 * HBA to complete restart. If the HBA does not restart within
3000 * 15 iterations, the function will reset the HBA again. The
3001 * function returns 1 when HBA fail to restart otherwise returns
3002 * zero.
3003 **/
3772a991
JS
3004static int
3005lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea3101e 3006{
41415862
JW
3007 uint32_t status;
3008 int i = 0;
3009 int retval = 0;
dea3101e 3010
41415862
JW
3011 /* Read the HBA Host Status Register */
3012 status = readl(phba->HSregaddr);
dea3101e 3013
41415862
JW
3014 /*
3015 * Check status register every 100ms for 5 retries, then every
3016 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3017 * every 2.5 sec for 4.
3018 * Break our of the loop if errors occurred during init.
3019 */
3020 while (((status & mask) != mask) &&
3021 !(status & HS_FFERM) &&
3022 i++ < 20) {
dea3101e 3023
41415862
JW
3024 if (i <= 5)
3025 msleep(10);
3026 else if (i <= 10)
3027 msleep(500);
3028 else
3029 msleep(2500);
dea3101e 3030
41415862 3031 if (i == 15) {
2e0fef85 3032 /* Do post */
92d7f7b0 3033 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862
JW
3034 lpfc_sli_brdrestart(phba);
3035 }
3036 /* Read the HBA Host Status Register */
3037 status = readl(phba->HSregaddr);
3038 }
dea3101e 3039
41415862
JW
3040 /* Check to see if any errors occurred during init */
3041 if ((status & HS_FFERM) || (i >= 20)) {
2e0fef85 3042 phba->link_state = LPFC_HBA_ERROR;
41415862 3043 retval = 1;
dea3101e 3044 }
dea3101e 3045
41415862
JW
3046 return retval;
3047}
dea3101e 3048
da0436e9
JS
3049/**
3050 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3051 * @phba: Pointer to HBA context object.
3052 * @mask: Bit mask to be checked.
3053 *
3054 * This function checks the host status register to check if HBA is
3055 * ready. This function will wait in a loop for the HBA to be ready
3056 * If the HBA is not ready , the function will will reset the HBA PCI
3057 * function again. The function returns 1 when HBA fail to be ready
3058 * otherwise returns zero.
3059 **/
3060static int
3061lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3062{
3063 uint32_t status;
3064 int retval = 0;
3065
3066 /* Read the HBA Host Status Register */
3067 status = lpfc_sli4_post_status_check(phba);
3068
3069 if (status) {
3070 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3071 lpfc_sli_brdrestart(phba);
3072 status = lpfc_sli4_post_status_check(phba);
3073 }
3074
3075 /* Check to see if any errors occurred during init */
3076 if (status) {
3077 phba->link_state = LPFC_HBA_ERROR;
3078 retval = 1;
3079 } else
3080 phba->sli4_hba.intr_enable = 0;
3081
3082 return retval;
3083}
3084
3085/**
3086 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3087 * @phba: Pointer to HBA context object.
3088 * @mask: Bit mask to be checked.
3089 *
3090 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3091 * from the API jump table function pointer from the lpfc_hba struct.
3092 **/
3093int
3094lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3095{
3096 return phba->lpfc_sli_brdready(phba, mask);
3097}
3098
9290831f
JS
3099#define BARRIER_TEST_PATTERN (0xdeadbeef)
3100
e59058c4 3101/**
3621a710 3102 * lpfc_reset_barrier - Make HBA ready for HBA reset
e59058c4
JS
3103 * @phba: Pointer to HBA context object.
3104 *
3105 * This function is called before resetting an HBA. This
3106 * function requests HBA to quiesce DMAs before a reset.
3107 **/
2e0fef85 3108void lpfc_reset_barrier(struct lpfc_hba *phba)
9290831f 3109{
65a29c16
JS
3110 uint32_t __iomem *resp_buf;
3111 uint32_t __iomem *mbox_buf;
9290831f
JS
3112 volatile uint32_t mbox;
3113 uint32_t hc_copy;
3114 int i;
3115 uint8_t hdrtype;
3116
3117 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3118 if (hdrtype != 0x80 ||
3119 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3120 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3121 return;
3122
3123 /*
3124 * Tell the other part of the chip to suspend temporarily all
3125 * its DMA activity.
3126 */
65a29c16 3127 resp_buf = phba->MBslimaddr;
9290831f
JS
3128
3129 /* Disable the error attention */
3130 hc_copy = readl(phba->HCregaddr);
3131 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3132 readl(phba->HCregaddr); /* flush */
2e0fef85 3133 phba->link_flag |= LS_IGNORE_ERATT;
9290831f
JS
3134
3135 if (readl(phba->HAregaddr) & HA_ERATT) {
3136 /* Clear Chip error bit */
3137 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3138 phba->pport->stopped = 1;
9290831f
JS
3139 }
3140
3141 mbox = 0;
3142 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3143 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3144
3145 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
65a29c16 3146 mbox_buf = phba->MBslimaddr;
9290831f
JS
3147 writel(mbox, mbox_buf);
3148
3149 for (i = 0;
3150 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
3151 mdelay(1);
3152
3153 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
f4b4c68f 3154 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2e0fef85 3155 phba->pport->stopped)
9290831f
JS
3156 goto restore_hc;
3157 else
3158 goto clear_errat;
3159 }
3160
3161 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3162 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
3163 mdelay(1);
3164
3165clear_errat:
3166
3167 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
3168 mdelay(1);
3169
3170 if (readl(phba->HAregaddr) & HA_ERATT) {
3171 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3172 phba->pport->stopped = 1;
9290831f
JS
3173 }
3174
3175restore_hc:
2e0fef85 3176 phba->link_flag &= ~LS_IGNORE_ERATT;
9290831f
JS
3177 writel(hc_copy, phba->HCregaddr);
3178 readl(phba->HCregaddr); /* flush */
3179}
3180
e59058c4 3181/**
3621a710 3182 * lpfc_sli_brdkill - Issue a kill_board mailbox command
e59058c4
JS
3183 * @phba: Pointer to HBA context object.
3184 *
3185 * This function issues a kill_board mailbox command and waits for
3186 * the error attention interrupt. This function is called for stopping
3187 * the firmware processing. The caller is not required to hold any
3188 * locks. This function calls lpfc_hba_down_post function to free
3189 * any pending commands after the kill. The function will return 1 when it
3190 * fails to kill the board else will return 0.
3191 **/
41415862 3192int
2e0fef85 3193lpfc_sli_brdkill(struct lpfc_hba *phba)
41415862
JW
3194{
3195 struct lpfc_sli *psli;
3196 LPFC_MBOXQ_t *pmb;
3197 uint32_t status;
3198 uint32_t ha_copy;
3199 int retval;
3200 int i = 0;
dea3101e 3201
41415862 3202 psli = &phba->sli;
dea3101e 3203
41415862 3204 /* Kill HBA */
ed957684 3205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011
JS
3206 "0329 Kill HBA Data: x%x x%x\n",
3207 phba->pport->port_state, psli->sli_flag);
41415862 3208
98c9ea5c
JS
3209 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3210 if (!pmb)
41415862 3211 return 1;
41415862
JW
3212
3213 /* Disable the error attention */
2e0fef85 3214 spin_lock_irq(&phba->hbalock);
41415862
JW
3215 status = readl(phba->HCregaddr);
3216 status &= ~HC_ERINT_ENA;
3217 writel(status, phba->HCregaddr);
3218 readl(phba->HCregaddr); /* flush */
2e0fef85
JS
3219 phba->link_flag |= LS_IGNORE_ERATT;
3220 spin_unlock_irq(&phba->hbalock);
41415862
JW
3221
3222 lpfc_kill_board(phba, pmb);
3223 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3224 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3225
3226 if (retval != MBX_SUCCESS) {
3227 if (retval != MBX_BUSY)
3228 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85
JS
3229 spin_lock_irq(&phba->hbalock);
3230 phba->link_flag &= ~LS_IGNORE_ERATT;
3231 spin_unlock_irq(&phba->hbalock);
41415862
JW
3232 return 1;
3233 }
3234
f4b4c68f
JS
3235 spin_lock_irq(&phba->hbalock);
3236 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3237 spin_unlock_irq(&phba->hbalock);
9290831f 3238
41415862
JW
3239 mempool_free(pmb, phba->mbox_mem_pool);
3240
3241 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3242 * attention every 100ms for 3 seconds. If we don't get ERATT after
3243 * 3 seconds we still set HBA_ERROR state because the status of the
3244 * board is now undefined.
3245 */
3246 ha_copy = readl(phba->HAregaddr);
3247
3248 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3249 mdelay(100);
3250 ha_copy = readl(phba->HAregaddr);
3251 }
3252
3253 del_timer_sync(&psli->mbox_tmo);
9290831f
JS
3254 if (ha_copy & HA_ERATT) {
3255 writel(HA_ERATT, phba->HAregaddr);
2e0fef85 3256 phba->pport->stopped = 1;
9290831f 3257 }
2e0fef85 3258 spin_lock_irq(&phba->hbalock);
41415862 3259 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
04c68496 3260 psli->mbox_active = NULL;
2e0fef85
JS
3261 phba->link_flag &= ~LS_IGNORE_ERATT;
3262 spin_unlock_irq(&phba->hbalock);
41415862 3263
41415862 3264 lpfc_hba_down_post(phba);
2e0fef85 3265 phba->link_state = LPFC_HBA_ERROR;
41415862 3266
2e0fef85 3267 return ha_copy & HA_ERATT ? 0 : 1;
dea3101e
JB
3268}
3269
e59058c4 3270/**
3772a991 3271 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
e59058c4
JS
3272 * @phba: Pointer to HBA context object.
3273 *
3274 * This function resets the HBA by writing HC_INITFF to the control
3275 * register. After the HBA resets, this function resets all the iocb ring
3276 * indices. This function disables PCI layer parity checking during
3277 * the reset.
3278 * This function returns 0 always.
3279 * The caller is not required to hold any locks.
3280 **/
41415862 3281int
2e0fef85 3282lpfc_sli_brdreset(struct lpfc_hba *phba)
dea3101e 3283{
41415862 3284 struct lpfc_sli *psli;
dea3101e 3285 struct lpfc_sli_ring *pring;
41415862 3286 uint16_t cfg_value;
dea3101e 3287 int i;
dea3101e 3288
41415862 3289 psli = &phba->sli;
dea3101e 3290
41415862
JW
3291 /* Reset HBA */
3292 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3293 "0325 Reset HBA Data: x%x x%x\n",
2e0fef85 3294 phba->pport->port_state, psli->sli_flag);
dea3101e
JB
3295
3296 /* perform board reset */
3297 phba->fc_eventTag = 0;
4d9ab994 3298 phba->link_events = 0;
2e0fef85
JS
3299 phba->pport->fc_myDID = 0;
3300 phba->pport->fc_prevDID = 0;
dea3101e 3301
41415862
JW
3302 /* Turn off parity checking and serr during the physical reset */
3303 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3304 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3305 (cfg_value &
3306 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3307
3772a991
JS
3308 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3309
41415862
JW
3310 /* Now toggle INITFF bit in the Host Control Register */
3311 writel(HC_INITFF, phba->HCregaddr);
3312 mdelay(1);
3313 readl(phba->HCregaddr); /* flush */
3314 writel(0, phba->HCregaddr);
3315 readl(phba->HCregaddr); /* flush */
3316
3317 /* Restore PCI cmd register */
3318 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea3101e
JB
3319
3320 /* Initialize relevant SLI info */
41415862
JW
3321 for (i = 0; i < psli->num_rings; i++) {
3322 pring = &psli->ring[i];
dea3101e
JB
3323 pring->flag = 0;
3324 pring->rspidx = 0;
3325 pring->next_cmdidx = 0;
3326 pring->local_getidx = 0;
3327 pring->cmdidx = 0;
3328 pring->missbufcnt = 0;
3329 }
dea3101e 3330
2e0fef85 3331 phba->link_state = LPFC_WARM_START;
41415862
JW
3332 return 0;
3333}
3334
e59058c4 3335/**
da0436e9
JS
3336 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3337 * @phba: Pointer to HBA context object.
3338 *
3339 * This function resets a SLI4 HBA. This function disables PCI layer parity
3340 * checking during resets the device. The caller is not required to hold
3341 * any locks.
3342 *
3343 * This function returns 0 always.
3344 **/
3345int
3346lpfc_sli4_brdreset(struct lpfc_hba *phba)
3347{
3348 struct lpfc_sli *psli = &phba->sli;
3349 uint16_t cfg_value;
3350 uint8_t qindx;
3351
3352 /* Reset HBA */
3353 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3354 "0295 Reset HBA Data: x%x x%x\n",
3355 phba->pport->port_state, psli->sli_flag);
3356
3357 /* perform board reset */
3358 phba->fc_eventTag = 0;
4d9ab994 3359 phba->link_events = 0;
da0436e9
JS
3360 phba->pport->fc_myDID = 0;
3361 phba->pport->fc_prevDID = 0;
3362
3363 /* Turn off parity checking and serr during the physical reset */
3364 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3365 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3366 (cfg_value &
3367 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3368
3369 spin_lock_irq(&phba->hbalock);
3370 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3371 phba->fcf.fcf_flag = 0;
3372 /* Clean up the child queue list for the CQs */
3373 list_del_init(&phba->sli4_hba.mbx_wq->list);
3374 list_del_init(&phba->sli4_hba.els_wq->list);
3375 list_del_init(&phba->sli4_hba.hdr_rq->list);
3376 list_del_init(&phba->sli4_hba.dat_rq->list);
3377 list_del_init(&phba->sli4_hba.mbx_cq->list);
3378 list_del_init(&phba->sli4_hba.els_cq->list);
da0436e9
JS
3379 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3380 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3381 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3382 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3383 spin_unlock_irq(&phba->hbalock);
3384
3385 /* Now physically reset the device */
3386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3387 "0389 Performing PCI function reset!\n");
3388 /* Perform FCoE PCI function reset */
3389 lpfc_pci_function_reset(phba);
3390
3391 return 0;
3392}
3393
3394/**
3395 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
e59058c4
JS
3396 * @phba: Pointer to HBA context object.
3397 *
3398 * This function is called in the SLI initialization code path to
3399 * restart the HBA. The caller is not required to hold any lock.
3400 * This function writes MBX_RESTART mailbox command to the SLIM and
3401 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3402 * function to free any pending commands. The function enables
3403 * POST only during the first initialization. The function returns zero.
3404 * The function does not guarantee completion of MBX_RESTART mailbox
3405 * command before the return of this function.
3406 **/
da0436e9
JS
3407static int
3408lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
41415862
JW
3409{
3410 MAILBOX_t *mb;
3411 struct lpfc_sli *psli;
41415862
JW
3412 volatile uint32_t word0;
3413 void __iomem *to_slim;
0d878419 3414 uint32_t hba_aer_enabled;
41415862 3415
2e0fef85 3416 spin_lock_irq(&phba->hbalock);
41415862 3417
0d878419
JS
3418 /* Take PCIe device Advanced Error Reporting (AER) state */
3419 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3420
41415862
JW
3421 psli = &phba->sli;
3422
3423 /* Restart HBA */
3424 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 3425 "0337 Restart HBA Data: x%x x%x\n",
2e0fef85 3426 phba->pport->port_state, psli->sli_flag);
41415862
JW
3427
3428 word0 = 0;
3429 mb = (MAILBOX_t *) &word0;
3430 mb->mbxCommand = MBX_RESTART;
3431 mb->mbxHc = 1;
3432
9290831f
JS
3433 lpfc_reset_barrier(phba);
3434
41415862
JW
3435 to_slim = phba->MBslimaddr;
3436 writel(*(uint32_t *) mb, to_slim);
3437 readl(to_slim); /* flush */
3438
3439 /* Only skip post after fc_ffinit is completed */
eaf15d5b 3440 if (phba->pport->port_state)
41415862 3441 word0 = 1; /* This is really setting up word1 */
eaf15d5b 3442 else
41415862 3443 word0 = 0; /* This is really setting up word1 */
65a29c16 3444 to_slim = phba->MBslimaddr + sizeof (uint32_t);
41415862
JW
3445 writel(*(uint32_t *) mb, to_slim);
3446 readl(to_slim); /* flush */
dea3101e 3447
41415862 3448 lpfc_sli_brdreset(phba);
2e0fef85
JS
3449 phba->pport->stopped = 0;
3450 phba->link_state = LPFC_INIT_START;
da0436e9 3451 phba->hba_flag = 0;
2e0fef85 3452 spin_unlock_irq(&phba->hbalock);
41415862 3453
64ba8818
JS
3454 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3455 psli->stats_start = get_seconds();
3456
eaf15d5b
JS
3457 /* Give the INITFF and Post time to settle. */
3458 mdelay(100);
41415862 3459
0d878419
JS
3460 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3461 if (hba_aer_enabled)
3462 pci_disable_pcie_error_reporting(phba->pcidev);
3463
41415862 3464 lpfc_hba_down_post(phba);
dea3101e
JB
3465
3466 return 0;
3467}
3468
da0436e9
JS
3469/**
3470 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3471 * @phba: Pointer to HBA context object.
3472 *
3473 * This function is called in the SLI initialization code path to restart
3474 * a SLI4 HBA. The caller is not required to hold any lock.
3475 * At the end of the function, it calls lpfc_hba_down_post function to
3476 * free any pending commands.
3477 **/
3478static int
3479lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3480{
3481 struct lpfc_sli *psli = &phba->sli;
3482
3483
3484 /* Restart HBA */
3485 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3486 "0296 Restart HBA Data: x%x x%x\n",
3487 phba->pport->port_state, psli->sli_flag);
3488
3489 lpfc_sli4_brdreset(phba);
3490
3491 spin_lock_irq(&phba->hbalock);
3492 phba->pport->stopped = 0;
3493 phba->link_state = LPFC_INIT_START;
3494 phba->hba_flag = 0;
3495 spin_unlock_irq(&phba->hbalock);
3496
3497 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3498 psli->stats_start = get_seconds();
3499
3500 lpfc_hba_down_post(phba);
3501
3502 return 0;
3503}
3504
3505/**
3506 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3507 * @phba: Pointer to HBA context object.
3508 *
3509 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3510 * API jump table function pointer from the lpfc_hba struct.
3511**/
3512int
3513lpfc_sli_brdrestart(struct lpfc_hba *phba)
3514{
3515 return phba->lpfc_sli_brdrestart(phba);
3516}
3517
e59058c4 3518/**
3621a710 3519 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
e59058c4
JS
3520 * @phba: Pointer to HBA context object.
3521 *
3522 * This function is called after a HBA restart to wait for successful
3523 * restart of the HBA. Successful restart of the HBA is indicated by
3524 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
3525 * iteration, the function will restart the HBA again. The function returns
3526 * zero if HBA successfully restarted else returns negative error code.
3527 **/
dea3101e
JB
3528static int
3529lpfc_sli_chipset_init(struct lpfc_hba *phba)
3530{
3531 uint32_t status, i = 0;
3532
3533 /* Read the HBA Host Status Register */
3534 status = readl(phba->HSregaddr);
3535
3536 /* Check status register to see what current state is */
3537 i = 0;
3538 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
3539
3540 /* Check every 100ms for 5 retries, then every 500ms for 5, then
3541 * every 2.5 sec for 5, then reset board and every 2.5 sec for
3542 * 4.
3543 */
3544 if (i++ >= 20) {
3545 /* Adapter failed to init, timeout, status reg
3546 <status> */
ed957684 3547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 3548 "0436 Adapter failed to init, "
09372820
JS
3549 "timeout, status reg x%x, "
3550 "FW Data: A8 x%x AC x%x\n", status,
3551 readl(phba->MBslimaddr + 0xa8),
3552 readl(phba->MBslimaddr + 0xac));
2e0fef85 3553 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
3554 return -ETIMEDOUT;
3555 }
3556
3557 /* Check to see if any errors occurred during init */
3558 if (status & HS_FFERM) {
3559 /* ERROR: During chipset initialization */
3560 /* Adapter failed to init, chipset, status reg
3561 <status> */
ed957684 3562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 3563 "0437 Adapter failed to init, "
09372820
JS
3564 "chipset, status reg x%x, "
3565 "FW Data: A8 x%x AC x%x\n", status,
3566 readl(phba->MBslimaddr + 0xa8),
3567 readl(phba->MBslimaddr + 0xac));
2e0fef85 3568 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
3569 return -EIO;
3570 }
3571
3572 if (i <= 5) {
3573 msleep(10);
3574 } else if (i <= 10) {
3575 msleep(500);
3576 } else {
3577 msleep(2500);
3578 }
3579
3580 if (i == 15) {
2e0fef85 3581 /* Do post */
92d7f7b0 3582 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 3583 lpfc_sli_brdrestart(phba);
dea3101e
JB
3584 }
3585 /* Read the HBA Host Status Register */
3586 status = readl(phba->HSregaddr);
3587 }
3588
3589 /* Check to see if any errors occurred during init */
3590 if (status & HS_FFERM) {
3591 /* ERROR: During chipset initialization */
3592 /* Adapter failed to init, chipset, status reg <status> */
ed957684 3593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 3594 "0438 Adapter failed to init, chipset, "
09372820
JS
3595 "status reg x%x, "
3596 "FW Data: A8 x%x AC x%x\n", status,
3597 readl(phba->MBslimaddr + 0xa8),
3598 readl(phba->MBslimaddr + 0xac));
2e0fef85 3599 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
3600 return -EIO;
3601 }
3602
3603 /* Clear all interrupt enable conditions */
3604 writel(0, phba->HCregaddr);
3605 readl(phba->HCregaddr); /* flush */
3606
3607 /* setup host attn register */
3608 writel(0xffffffff, phba->HAregaddr);
3609 readl(phba->HAregaddr); /* flush */
3610 return 0;
3611}
3612
e59058c4 3613/**
3621a710 3614 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
e59058c4
JS
3615 *
3616 * This function calculates and returns the number of HBQs required to be
3617 * configured.
3618 **/
78b2d852 3619int
ed957684
JS
3620lpfc_sli_hbq_count(void)
3621{
92d7f7b0 3622 return ARRAY_SIZE(lpfc_hbq_defs);
ed957684
JS
3623}
3624
e59058c4 3625/**
3621a710 3626 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
e59058c4
JS
3627 *
3628 * This function adds the number of hbq entries in every HBQ to get
3629 * the total number of hbq entries required for the HBA and returns
3630 * the total count.
3631 **/
ed957684
JS
3632static int
3633lpfc_sli_hbq_entry_count(void)
3634{
3635 int hbq_count = lpfc_sli_hbq_count();
3636 int count = 0;
3637 int i;
3638
3639 for (i = 0; i < hbq_count; ++i)
92d7f7b0 3640 count += lpfc_hbq_defs[i]->entry_count;
ed957684
JS
3641 return count;
3642}
3643
e59058c4 3644/**
3621a710 3645 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
e59058c4
JS
3646 *
3647 * This function calculates amount of memory required for all hbq entries
3648 * to be configured and returns the total memory required.
3649 **/
dea3101e 3650int
ed957684
JS
3651lpfc_sli_hbq_size(void)
3652{
3653 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
3654}
3655
e59058c4 3656/**
3621a710 3657 * lpfc_sli_hbq_setup - configure and initialize HBQs
e59058c4
JS
3658 * @phba: Pointer to HBA context object.
3659 *
3660 * This function is called during the SLI initialization to configure
3661 * all the HBQs and post buffers to the HBQ. The caller is not
3662 * required to hold any locks. This function will return zero if successful
3663 * else it will return negative error code.
3664 **/
ed957684
JS
3665static int
3666lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3667{
3668 int hbq_count = lpfc_sli_hbq_count();
3669 LPFC_MBOXQ_t *pmb;
3670 MAILBOX_t *pmbox;
3671 uint32_t hbqno;
3672 uint32_t hbq_entry_index;
ed957684 3673
92d7f7b0
JS
3674 /* Get a Mailbox buffer to setup mailbox
3675 * commands for HBA initialization
3676 */
ed957684
JS
3677 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3678
3679 if (!pmb)
3680 return -ENOMEM;
3681
04c68496 3682 pmbox = &pmb->u.mb;
ed957684
JS
3683
3684 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
3685 phba->link_state = LPFC_INIT_MBX_CMDS;
3163f725 3686 phba->hbq_in_use = 1;
ed957684
JS
3687
3688 hbq_entry_index = 0;
3689 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
3690 phba->hbqs[hbqno].next_hbqPutIdx = 0;
3691 phba->hbqs[hbqno].hbqPutIdx = 0;
3692 phba->hbqs[hbqno].local_hbqGetIdx = 0;
3693 phba->hbqs[hbqno].entry_count =
92d7f7b0 3694 lpfc_hbq_defs[hbqno]->entry_count;
51ef4c26
JS
3695 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
3696 hbq_entry_index, pmb);
ed957684
JS
3697 hbq_entry_index += phba->hbqs[hbqno].entry_count;
3698
3699 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
3700 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
3701 mbxStatus <status>, ring <num> */
3702
3703 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 3704 LOG_SLI | LOG_VPORT,
e8b62011 3705 "1805 Adapter failed to init. "
ed957684 3706 "Data: x%x x%x x%x\n",
e8b62011 3707 pmbox->mbxCommand,
ed957684
JS
3708 pmbox->mbxStatus, hbqno);
3709
3710 phba->link_state = LPFC_HBA_ERROR;
3711 mempool_free(pmb, phba->mbox_mem_pool);
ed957684
JS
3712 return ENXIO;
3713 }
3714 }
3715 phba->hbq_count = hbq_count;
3716
ed957684
JS
3717 mempool_free(pmb, phba->mbox_mem_pool);
3718
92d7f7b0 3719 /* Initially populate or replenish the HBQs */
d7c255b2
JS
3720 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
3721 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
ed957684
JS
3722 return 0;
3723}
3724
4f774513
JS
3725/**
3726 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3727 * @phba: Pointer to HBA context object.
3728 *
3729 * This function is called during the SLI initialization to configure
3730 * all the HBQs and post buffers to the HBQ. The caller is not
3731 * required to hold any locks. This function will return zero if successful
3732 * else it will return negative error code.
3733 **/
3734static int
3735lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3736{
3737 phba->hbq_in_use = 1;
3738 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3739 phba->hbq_count = 1;
3740 /* Initially populate or replenish the HBQs */
3741 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3742 return 0;
3743}
3744
e59058c4 3745/**
3621a710 3746 * lpfc_sli_config_port - Issue config port mailbox command
e59058c4
JS
3747 * @phba: Pointer to HBA context object.
3748 * @sli_mode: sli mode - 2/3
3749 *
3750 * This function is called by the sli intialization code path
3751 * to issue config_port mailbox command. This function restarts the
3752 * HBA firmware and issues a config_port mailbox command to configure
3753 * the SLI interface in the sli mode specified by sli_mode
3754 * variable. The caller is not required to hold any locks.
3755 * The function returns 0 if successful, else returns negative error
3756 * code.
3757 **/
9399627f
JS
3758int
3759lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea3101e
JB
3760{
3761 LPFC_MBOXQ_t *pmb;
3762 uint32_t resetcount = 0, rc = 0, done = 0;
3763
3764 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3765 if (!pmb) {
2e0fef85 3766 phba->link_state = LPFC_HBA_ERROR;
dea3101e
JB
3767 return -ENOMEM;
3768 }
3769
ed957684 3770 phba->sli_rev = sli_mode;
dea3101e 3771 while (resetcount < 2 && !done) {
2e0fef85 3772 spin_lock_irq(&phba->hbalock);
1c067a42 3773 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2e0fef85 3774 spin_unlock_irq(&phba->hbalock);
92d7f7b0 3775 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
41415862 3776 lpfc_sli_brdrestart(phba);
dea3101e
JB
3777 rc = lpfc_sli_chipset_init(phba);
3778 if (rc)
3779 break;
3780
2e0fef85 3781 spin_lock_irq(&phba->hbalock);
1c067a42 3782 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 3783 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
3784 resetcount++;
3785
ed957684
JS
3786 /* Call pre CONFIG_PORT mailbox command initialization. A
3787 * value of 0 means the call was successful. Any other
3788 * nonzero value is a failure, but if ERESTART is returned,
3789 * the driver may reset the HBA and try again.
3790 */
dea3101e
JB
3791 rc = lpfc_config_port_prep(phba);
3792 if (rc == -ERESTART) {
ed957684 3793 phba->link_state = LPFC_LINK_UNKNOWN;
dea3101e 3794 continue;
34b02dcd 3795 } else if (rc)
dea3101e 3796 break;
2e0fef85 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e
JB
3798 lpfc_config_port(phba, pmb);
3799 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
34b02dcd
JS
3800 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3801 LPFC_SLI3_HBQ_ENABLED |
3802 LPFC_SLI3_CRP_ENABLED |
e2a0a9d6
JS
3803 LPFC_SLI3_INB_ENABLED |
3804 LPFC_SLI3_BG_ENABLED);
ed957684 3805 if (rc != MBX_SUCCESS) {
dea3101e 3806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 3807 "0442 Adapter failed to init, mbxCmd x%x "
92d7f7b0 3808 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
04c68496 3809 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
2e0fef85 3810 spin_lock_irq(&phba->hbalock);
04c68496 3811 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
2e0fef85
JS
3812 spin_unlock_irq(&phba->hbalock);
3813 rc = -ENXIO;
04c68496
JS
3814 } else {
3815 /* Allow asynchronous mailbox command to go through */
3816 spin_lock_irq(&phba->hbalock);
3817 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3818 spin_unlock_irq(&phba->hbalock);
ed957684 3819 done = 1;
04c68496 3820 }
dea3101e 3821 }
ed957684
JS
3822 if (!done) {
3823 rc = -EINVAL;
3824 goto do_prep_failed;
3825 }
04c68496
JS
3826 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3827 if (!pmb->u.mb.un.varCfgPort.cMA) {
34b02dcd
JS
3828 rc = -ENXIO;
3829 goto do_prep_failed;
3830 }
04c68496 3831 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
34b02dcd 3832 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
04c68496
JS
3833 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3834 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3835 phba->max_vpi : phba->max_vports;
3836
34b02dcd
JS
3837 } else
3838 phba->max_vpi = 0;
04c68496
JS
3839 if (pmb->u.mb.un.varCfgPort.gdss)
3840 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3841 if (pmb->u.mb.un.varCfgPort.gerbm)
34b02dcd 3842 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
04c68496 3843 if (pmb->u.mb.un.varCfgPort.gcrp)
34b02dcd 3844 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
04c68496 3845 if (pmb->u.mb.un.varCfgPort.ginb) {
34b02dcd 3846 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
8f34f4ce 3847 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
34b02dcd
JS
3848 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3849 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3850 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3851 phba->inb_last_counter =
3852 phba->mbox->us.s3_inb_pgp.counter;
3853 } else {
8f34f4ce 3854 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
34b02dcd
JS
3855 phba->port_gp = phba->mbox->us.s3_pgp.port;
3856 phba->inb_ha_copy = NULL;
3857 phba->inb_counter = NULL;
3858 }
e2a0a9d6
JS
3859
3860 if (phba->cfg_enable_bg) {
04c68496 3861 if (pmb->u.mb.un.varCfgPort.gbg)
e2a0a9d6
JS
3862 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3863 else
3864 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3865 "0443 Adapter did not grant "
3866 "BlockGuard\n");
3867 }
34b02dcd 3868 } else {
8f34f4ce 3869 phba->hbq_get = NULL;
34b02dcd
JS
3870 phba->port_gp = phba->mbox->us.s2.port;
3871 phba->inb_ha_copy = NULL;
3872 phba->inb_counter = NULL;
d7c255b2 3873 phba->max_vpi = 0;
ed957684 3874 }
92d7f7b0 3875do_prep_failed:
ed957684
JS
3876 mempool_free(pmb, phba->mbox_mem_pool);
3877 return rc;
3878}
3879
e59058c4
JS
3880
3881/**
3621a710 3882 * lpfc_sli_hba_setup - SLI intialization function
e59058c4
JS
3883 * @phba: Pointer to HBA context object.
3884 *
3885 * This function is the main SLI intialization function. This function
3886 * is called by the HBA intialization code, HBA reset code and HBA
3887 * error attention handler code. Caller is not required to hold any
3888 * locks. This function issues config_port mailbox command to configure
3889 * the SLI, setup iocb rings and HBQ rings. In the end the function
3890 * calls the config_port_post function to issue init_link mailbox
3891 * command and to start the discovery. The function will return zero
3892 * if successful, else it will return negative error code.
3893 **/
ed957684
JS
3894int
3895lpfc_sli_hba_setup(struct lpfc_hba *phba)
3896{
3897 uint32_t rc;
92d7f7b0 3898 int mode = 3;
ed957684
JS
3899
3900 switch (lpfc_sli_mode) {
3901 case 2:
78b2d852 3902 if (phba->cfg_enable_npiv) {
92d7f7b0 3903 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011 3904 "1824 NPIV enabled: Override lpfc_sli_mode "
92d7f7b0 3905 "parameter (%d) to auto (0).\n",
e8b62011 3906 lpfc_sli_mode);
92d7f7b0
JS
3907 break;
3908 }
ed957684
JS
3909 mode = 2;
3910 break;
3911 case 0:
3912 case 3:
3913 break;
3914 default:
92d7f7b0 3915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
3916 "1819 Unrecognized lpfc_sli_mode "
3917 "parameter: %d.\n", lpfc_sli_mode);
ed957684
JS
3918
3919 break;
3920 }
3921
9399627f
JS
3922 rc = lpfc_sli_config_port(phba, mode);
3923
ed957684 3924 if (rc && lpfc_sli_mode == 3)
92d7f7b0 3925 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
e8b62011
JS
3926 "1820 Unable to select SLI-3. "
3927 "Not supported by adapter.\n");
ed957684 3928 if (rc && mode != 2)
9399627f 3929 rc = lpfc_sli_config_port(phba, 2);
ed957684 3930 if (rc)
dea3101e
JB
3931 goto lpfc_sli_hba_setup_error;
3932
0d878419
JS
3933 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
3934 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
3935 rc = pci_enable_pcie_error_reporting(phba->pcidev);
3936 if (!rc) {
3937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3938 "2709 This device supports "
3939 "Advanced Error Reporting (AER)\n");
3940 spin_lock_irq(&phba->hbalock);
3941 phba->hba_flag |= HBA_AER_ENABLED;
3942 spin_unlock_irq(&phba->hbalock);
3943 } else {
3944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3945 "2708 This device does not support "
3946 "Advanced Error Reporting (AER)\n");
3947 phba->cfg_aer_support = 0;
3948 }
3949 }
3950
ed957684
JS
3951 if (phba->sli_rev == 3) {
3952 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
3953 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
ed957684
JS
3954 } else {
3955 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
3956 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
92d7f7b0 3957 phba->sli3_options = 0;
ed957684
JS
3958 }
3959
3960 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
3961 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
3962 phba->sli_rev, phba->max_vpi);
ed957684 3963 rc = lpfc_sli_ring_map(phba);
dea3101e
JB
3964
3965 if (rc)
3966 goto lpfc_sli_hba_setup_error;
3967
9399627f 3968 /* Init HBQs */
ed957684
JS
3969 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3970 rc = lpfc_sli_hbq_setup(phba);
3971 if (rc)
3972 goto lpfc_sli_hba_setup_error;
3973 }
04c68496 3974 spin_lock_irq(&phba->hbalock);
dea3101e 3975 phba->sli.sli_flag |= LPFC_PROCESS_LA;
04c68496 3976 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
3977
3978 rc = lpfc_config_port_post(phba);
3979 if (rc)
3980 goto lpfc_sli_hba_setup_error;
3981
ed957684
JS
3982 return rc;
3983
92d7f7b0 3984lpfc_sli_hba_setup_error:
2e0fef85 3985 phba->link_state = LPFC_HBA_ERROR;
ed957684 3986 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 3987 "0445 Firmware initialization failed\n");
dea3101e
JB
3988 return rc;
3989}
3990
e59058c4 3991/**
da0436e9
JS
3992 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
3993 * @phba: Pointer to HBA context object.
3994 * @mboxq: mailbox pointer.
3995 * This function issue a dump mailbox command to read config region
3996 * 23 and parse the records in the region and populate driver
3997 * data structure.
e59058c4 3998 **/
da0436e9
JS
3999static int
4000lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4001 LPFC_MBOXQ_t *mboxq)
dea3101e 4002{
da0436e9
JS
4003 struct lpfc_dmabuf *mp;
4004 struct lpfc_mqe *mqe;
4005 uint32_t data_length;
4006 int rc;
dea3101e 4007
da0436e9
JS
4008 /* Program the default value of vlan_id and fc_map */
4009 phba->valid_vlan = 0;
4010 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4011 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4012 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
2e0fef85 4013
da0436e9
JS
4014 mqe = &mboxq->u.mqe;
4015 if (lpfc_dump_fcoe_param(phba, mboxq))
4016 return -ENOMEM;
4017
4018 mp = (struct lpfc_dmabuf *) mboxq->context1;
4019 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4020
4021 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4022 "(%d):2571 Mailbox cmd x%x Status x%x "
4023 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4024 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4025 "CQ: x%x x%x x%x x%x\n",
4026 mboxq->vport ? mboxq->vport->vpi : 0,
4027 bf_get(lpfc_mqe_command, mqe),
4028 bf_get(lpfc_mqe_status, mqe),
4029 mqe->un.mb_words[0], mqe->un.mb_words[1],
4030 mqe->un.mb_words[2], mqe->un.mb_words[3],
4031 mqe->un.mb_words[4], mqe->un.mb_words[5],
4032 mqe->un.mb_words[6], mqe->un.mb_words[7],
4033 mqe->un.mb_words[8], mqe->un.mb_words[9],
4034 mqe->un.mb_words[10], mqe->un.mb_words[11],
4035 mqe->un.mb_words[12], mqe->un.mb_words[13],
4036 mqe->un.mb_words[14], mqe->un.mb_words[15],
4037 mqe->un.mb_words[16], mqe->un.mb_words[50],
4038 mboxq->mcqe.word0,
4039 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4040 mboxq->mcqe.trailer);
4041
4042 if (rc) {
4043 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4044 kfree(mp);
4045 return -EIO;
4046 }
4047 data_length = mqe->un.mb_words[5];
a0c87cbd 4048 if (data_length > DMP_RGN23_SIZE) {
d11e31dd
JS
4049 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4050 kfree(mp);
da0436e9 4051 return -EIO;
d11e31dd 4052 }
dea3101e 4053
da0436e9
JS
4054 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4055 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4056 kfree(mp);
4057 return 0;
4058}
e59058c4
JS
4059
4060/**
da0436e9
JS
4061 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4062 * @phba: pointer to lpfc hba data structure.
4063 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4064 * @vpd: pointer to the memory to hold resulting port vpd data.
4065 * @vpd_size: On input, the number of bytes allocated to @vpd.
4066 * On output, the number of data bytes in @vpd.
e59058c4 4067 *
da0436e9
JS
4068 * This routine executes a READ_REV SLI4 mailbox command. In
4069 * addition, this routine gets the port vpd data.
4070 *
4071 * Return codes
af901ca1 4072 * 0 - successful
da0436e9 4073 * ENOMEM - could not allocated memory.
e59058c4 4074 **/
da0436e9
JS
4075static int
4076lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4077 uint8_t *vpd, uint32_t *vpd_size)
dea3101e 4078{
da0436e9
JS
4079 int rc = 0;
4080 uint32_t dma_size;
4081 struct lpfc_dmabuf *dmabuf;
4082 struct lpfc_mqe *mqe;
dea3101e 4083
da0436e9
JS
4084 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4085 if (!dmabuf)
4086 return -ENOMEM;
4087
4088 /*
4089 * Get a DMA buffer for the vpd data resulting from the READ_REV
4090 * mailbox command.
a257bf90 4091 */
da0436e9
JS
4092 dma_size = *vpd_size;
4093 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4094 dma_size,
4095 &dmabuf->phys,
4096 GFP_KERNEL);
4097 if (!dmabuf->virt) {
4098 kfree(dmabuf);
4099 return -ENOMEM;
a257bf90 4100 }
da0436e9 4101 memset(dmabuf->virt, 0, dma_size);
a257bf90 4102
da0436e9
JS
4103 /*
4104 * The SLI4 implementation of READ_REV conflicts at word1,
4105 * bits 31:16 and SLI4 adds vpd functionality not present
4106 * in SLI3. This code corrects the conflicts.
1dcb58e5 4107 */
da0436e9
JS
4108 lpfc_read_rev(phba, mboxq);
4109 mqe = &mboxq->u.mqe;
4110 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4111 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4112 mqe->un.read_rev.word1 &= 0x0000FFFF;
4113 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4114 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4115
4116 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4117 if (rc) {
4118 dma_free_coherent(&phba->pcidev->dev, dma_size,
4119 dmabuf->virt, dmabuf->phys);
def9c7a9 4120 kfree(dmabuf);
da0436e9
JS
4121 return -EIO;
4122 }
1dcb58e5 4123
da0436e9
JS
4124 /*
4125 * The available vpd length cannot be bigger than the
4126 * DMA buffer passed to the port. Catch the less than
4127 * case and update the caller's size.
4128 */
4129 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4130 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3772a991 4131
da0436e9
JS
4132 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4133 dma_free_coherent(&phba->pcidev->dev, dma_size,
4134 dmabuf->virt, dmabuf->phys);
4135 kfree(dmabuf);
4136 return 0;
dea3101e
JB
4137}
4138
e59058c4 4139/**
da0436e9
JS
4140 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4141 * @phba: pointer to lpfc hba data structure.
e59058c4 4142 *
da0436e9
JS
4143 * This routine is called to explicitly arm the SLI4 device's completion and
4144 * event queues
4145 **/
4146static void
4147lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4148{
4149 uint8_t fcp_eqidx;
4150
4151 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4152 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
da0436e9
JS
4153 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4154 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4155 LPFC_QUEUE_REARM);
4156 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4157 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4158 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4159 LPFC_QUEUE_REARM);
4160}
4161
4162/**
4163 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4164 * @phba: Pointer to HBA context object.
4165 *
4166 * This function is the main SLI4 device intialization PCI function. This
4167 * function is called by the HBA intialization code, HBA reset code and
4168 * HBA error attention handler code. Caller is not required to hold any
4169 * locks.
4170 **/
4171int
4172lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4173{
4174 int rc;
4175 LPFC_MBOXQ_t *mboxq;
4176 struct lpfc_mqe *mqe;
4177 uint8_t *vpd;
4178 uint32_t vpd_size;
4179 uint32_t ftr_rsp = 0;
4180 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4181 struct lpfc_vport *vport = phba->pport;
4182 struct lpfc_dmabuf *mp;
4183
4184 /* Perform a PCI function reset to start from clean */
4185 rc = lpfc_pci_function_reset(phba);
4186 if (unlikely(rc))
4187 return -ENODEV;
4188
4189 /* Check the HBA Host Status Register for readyness */
4190 rc = lpfc_sli4_post_status_check(phba);
4191 if (unlikely(rc))
4192 return -ENODEV;
4193 else {
4194 spin_lock_irq(&phba->hbalock);
4195 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4196 spin_unlock_irq(&phba->hbalock);
4197 }
4198
4199 /*
4200 * Allocate a single mailbox container for initializing the
4201 * port.
4202 */
4203 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4204 if (!mboxq)
4205 return -ENOMEM;
4206
4207 /*
4208 * Continue initialization with default values even if driver failed
4209 * to read FCoE param config regions
4210 */
4211 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4212 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
e4e74273 4213 "2570 Failed to read FCoE parameters\n");
da0436e9
JS
4214
4215 /* Issue READ_REV to collect vpd and FW information. */
4216 vpd_size = PAGE_SIZE;
4217 vpd = kzalloc(vpd_size, GFP_KERNEL);
4218 if (!vpd) {
4219 rc = -ENOMEM;
4220 goto out_free_mbox;
4221 }
4222
4223 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4224 if (unlikely(rc))
4225 goto out_free_vpd;
4226
4227 mqe = &mboxq->u.mqe;
f1126688
JS
4228 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4229 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4230 phba->hba_flag |= HBA_FCOE_SUPPORT;
45ed1190
JS
4231
4232 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4233 LPFC_DCBX_CEE_MODE)
4234 phba->hba_flag |= HBA_FIP_SUPPORT;
4235 else
4236 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4237
f1126688
JS
4238 if (phba->sli_rev != LPFC_SLI_REV4 ||
4239 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
da0436e9
JS
4240 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4241 "0376 READ_REV Error. SLI Level %d "
4242 "FCoE enabled %d\n",
f1126688 4243 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
da0436e9
JS
4244 rc = -EIO;
4245 goto out_free_vpd;
4246 }
da0436e9
JS
4247 /*
4248 * Evaluate the read rev and vpd data. Populate the driver
4249 * state with the results. If this routine fails, the failure
4250 * is not fatal as the driver will use generic values.
4251 */
4252 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4253 if (unlikely(!rc)) {
4254 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4255 "0377 Error %d parsing vpd. "
4256 "Using defaults.\n", rc);
4257 rc = 0;
4258 }
4259
f1126688
JS
4260 /* Save information as VPD data */
4261 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4262 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4263 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4264 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4265 &mqe->un.read_rev);
4266 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4267 &mqe->un.read_rev);
4268 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4269 &mqe->un.read_rev);
4270 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4271 &mqe->un.read_rev);
4272 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4273 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4274 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4275 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4276 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4277 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4278 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4279 "(%d):0380 READ_REV Status x%x "
4280 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4281 mboxq->vport ? mboxq->vport->vpi : 0,
4282 bf_get(lpfc_mqe_status, mqe),
4283 phba->vpd.rev.opFwName,
4284 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4285 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
da0436e9
JS
4286
4287 /*
4288 * Discover the port's supported feature set and match it against the
4289 * hosts requests.
4290 */
4291 lpfc_request_features(phba, mboxq);
4292 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4293 if (unlikely(rc)) {
4294 rc = -EIO;
4295 goto out_free_vpd;
4296 }
4297
4298 /*
4299 * The port must support FCP initiator mode as this is the
4300 * only mode running in the host.
4301 */
4302 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4303 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4304 "0378 No support for fcpi mode.\n");
4305 ftr_rsp++;
4306 }
4307
4308 /*
4309 * If the port cannot support the host's requested features
4310 * then turn off the global config parameters to disable the
4311 * feature in the driver. This is not a fatal error.
4312 */
4313 if ((phba->cfg_enable_bg) &&
4314 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4315 ftr_rsp++;
4316
4317 if (phba->max_vpi && phba->cfg_enable_npiv &&
4318 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4319 ftr_rsp++;
4320
4321 if (ftr_rsp) {
4322 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4323 "0379 Feature Mismatch Data: x%08x %08x "
4324 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4325 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4326 phba->cfg_enable_npiv, phba->max_vpi);
4327 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4328 phba->cfg_enable_bg = 0;
4329 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4330 phba->cfg_enable_npiv = 0;
4331 }
4332
4333 /* These SLI3 features are assumed in SLI4 */
4334 spin_lock_irq(&phba->hbalock);
4335 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4336 spin_unlock_irq(&phba->hbalock);
4337
4338 /* Read the port's service parameters. */
4339 lpfc_read_sparam(phba, mboxq, vport->vpi);
4340 mboxq->vport = vport;
4341 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4342 mp = (struct lpfc_dmabuf *) mboxq->context1;
4343 if (rc == MBX_SUCCESS) {
4344 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4345 rc = 0;
4346 }
4347
4348 /*
4349 * This memory was allocated by the lpfc_read_sparam routine. Release
4350 * it to the mbuf pool.
4351 */
4352 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4353 kfree(mp);
4354 mboxq->context1 = NULL;
4355 if (unlikely(rc)) {
4356 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4357 "0382 READ_SPARAM command failed "
4358 "status %d, mbxStatus x%x\n",
4359 rc, bf_get(lpfc_mqe_status, mqe));
4360 phba->link_state = LPFC_HBA_ERROR;
4361 rc = -EIO;
4362 goto out_free_vpd;
4363 }
4364
4365 if (phba->cfg_soft_wwnn)
4366 u64_to_wwn(phba->cfg_soft_wwnn,
4367 vport->fc_sparam.nodeName.u.wwn);
4368 if (phba->cfg_soft_wwpn)
4369 u64_to_wwn(phba->cfg_soft_wwpn,
4370 vport->fc_sparam.portName.u.wwn);
4371 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4372 sizeof(struct lpfc_name));
4373 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4374 sizeof(struct lpfc_name));
4375
4376 /* Update the fc_host data structures with new wwn. */
4377 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4378 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4379
4380 /* Register SGL pool to the device using non-embedded mailbox command */
4381 rc = lpfc_sli4_post_sgl_list(phba);
4382 if (unlikely(rc)) {
4383 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6a9c52cf
JS
4384 "0582 Error %d during sgl post operation\n",
4385 rc);
da0436e9
JS
4386 rc = -ENODEV;
4387 goto out_free_vpd;
4388 }
4389
4390 /* Register SCSI SGL pool to the device */
4391 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4392 if (unlikely(rc)) {
4393 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6a9c52cf
JS
4394 "0383 Error %d during scsi sgl post "
4395 "operation\n", rc);
da0436e9
JS
4396 /* Some Scsi buffers were moved to the abort scsi list */
4397 /* A pci function reset will repost them */
4398 rc = -ENODEV;
4399 goto out_free_vpd;
4400 }
4401
4402 /* Post the rpi header region to the device. */
4403 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4404 if (unlikely(rc)) {
4405 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4406 "0393 Error %d during rpi post operation\n",
4407 rc);
4408 rc = -ENODEV;
4409 goto out_free_vpd;
4410 }
da0436e9
JS
4411
4412 /* Set up all the queues to the device */
4413 rc = lpfc_sli4_queue_setup(phba);
4414 if (unlikely(rc)) {
4415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4416 "0381 Error %d during queue setup.\n ", rc);
4417 goto out_stop_timers;
4418 }
4419
4420 /* Arm the CQs and then EQs on device */
4421 lpfc_sli4_arm_cqeq_intr(phba);
4422
4423 /* Indicate device interrupt mode */
4424 phba->sli4_hba.intr_enable = 1;
4425
4426 /* Allow asynchronous mailbox command to go through */
4427 spin_lock_irq(&phba->hbalock);
4428 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4429 spin_unlock_irq(&phba->hbalock);
4430
4431 /* Post receive buffers to the device */
4432 lpfc_sli4_rb_setup(phba);
4433
4434 /* Start the ELS watchdog timer */
8fa38513
JS
4435 mod_timer(&vport->els_tmofunc,
4436 jiffies + HZ * (phba->fc_ratov * 2));
da0436e9
JS
4437
4438 /* Start heart beat timer */
4439 mod_timer(&phba->hb_tmofunc,
4440 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4441 phba->hb_outstanding = 0;
4442 phba->last_completion_time = jiffies;
4443
4444 /* Start error attention (ERATT) polling timer */
4445 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4446
4447 /*
4448 * The port is ready, set the host's link state to LINK_DOWN
4449 * in preparation for link interrupts.
4450 */
4451 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4452 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4453 lpfc_set_loopback_flag(phba);
4454 /* Change driver state to LPFC_LINK_DOWN right before init link */
4455 spin_lock_irq(&phba->hbalock);
4456 phba->link_state = LPFC_LINK_DOWN;
4457 spin_unlock_irq(&phba->hbalock);
4458 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4459 if (unlikely(rc != MBX_NOT_FINISHED)) {
4460 kfree(vpd);
4461 return 0;
4462 } else
4463 rc = -EIO;
4464
4465 /* Unset all the queues set up in this routine when error out */
4466 if (rc)
4467 lpfc_sli4_queue_unset(phba);
4468
4469out_stop_timers:
4470 if (rc)
4471 lpfc_stop_hba_timers(phba);
4472out_free_vpd:
4473 kfree(vpd);
4474out_free_mbox:
4475 mempool_free(mboxq, phba->mbox_mem_pool);
4476 return rc;
4477}
4478
4479/**
4480 * lpfc_mbox_timeout - Timeout call back function for mbox timer
4481 * @ptr: context object - pointer to hba structure.
4482 *
4483 * This is the callback function for mailbox timer. The mailbox
4484 * timer is armed when a new mailbox command is issued and the timer
4485 * is deleted when the mailbox complete. The function is called by
4486 * the kernel timer code when a mailbox does not complete within
4487 * expected time. This function wakes up the worker thread to
4488 * process the mailbox timeout and returns. All the processing is
4489 * done by the worker thread function lpfc_mbox_timeout_handler.
4490 **/
4491void
4492lpfc_mbox_timeout(unsigned long ptr)
4493{
4494 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4495 unsigned long iflag;
4496 uint32_t tmo_posted;
4497
4498 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
4499 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
4500 if (!tmo_posted)
4501 phba->pport->work_port_events |= WORKER_MBOX_TMO;
4502 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
4503
4504 if (!tmo_posted)
4505 lpfc_worker_wake_up(phba);
4506 return;
4507}
4508
4509
4510/**
4511 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
4512 * @phba: Pointer to HBA context object.
4513 *
4514 * This function is called from worker thread when a mailbox command times out.
4515 * The caller is not required to hold any locks. This function will reset the
4516 * HBA and recover all the pending commands.
4517 **/
4518void
4519lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
4520{
4521 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
04c68496 4522 MAILBOX_t *mb = &pmbox->u.mb;
da0436e9
JS
4523 struct lpfc_sli *psli = &phba->sli;
4524 struct lpfc_sli_ring *pring;
4525
4526 /* Check the pmbox pointer first. There is a race condition
4527 * between the mbox timeout handler getting executed in the
4528 * worklist and the mailbox actually completing. When this
4529 * race condition occurs, the mbox_active will be NULL.
4530 */
4531 spin_lock_irq(&phba->hbalock);
4532 if (pmbox == NULL) {
4533 lpfc_printf_log(phba, KERN_WARNING,
4534 LOG_MBOX | LOG_SLI,
4535 "0353 Active Mailbox cleared - mailbox timeout "
4536 "exiting\n");
4537 spin_unlock_irq(&phba->hbalock);
4538 return;
4539 }
4540
4541 /* Mbox cmd <mbxCommand> timeout */
4542 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4543 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
4544 mb->mbxCommand,
4545 phba->pport->port_state,
4546 phba->sli.sli_flag,
4547 phba->sli.mbox_active);
4548 spin_unlock_irq(&phba->hbalock);
4549
4550 /* Setting state unknown so lpfc_sli_abort_iocb_ring
4551 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
4552 * it to fail all oustanding SCSI IO.
4553 */
4554 spin_lock_irq(&phba->pport->work_port_lock);
4555 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4556 spin_unlock_irq(&phba->pport->work_port_lock);
4557 spin_lock_irq(&phba->hbalock);
4558 phba->link_state = LPFC_LINK_UNKNOWN;
f4b4c68f 4559 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
da0436e9
JS
4560 spin_unlock_irq(&phba->hbalock);
4561
4562 pring = &psli->ring[psli->fcp_ring];
4563 lpfc_sli_abort_iocb_ring(phba, pring);
4564
4565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4566 "0345 Resetting board due to mailbox timeout\n");
4567
4568 /* Reset the HBA device */
4569 lpfc_reset_hba(phba);
4570}
4571
4572/**
4573 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
4574 * @phba: Pointer to HBA context object.
4575 * @pmbox: Pointer to mailbox object.
4576 * @flag: Flag indicating how the mailbox need to be processed.
4577 *
4578 * This function is called by discovery code and HBA management code
4579 * to submit a mailbox command to firmware with SLI-3 interface spec. This
4580 * function gets the hbalock to protect the data structures.
4581 * The mailbox command can be submitted in polling mode, in which case
4582 * this function will wait in a polling loop for the completion of the
4583 * mailbox.
4584 * If the mailbox is submitted in no_wait mode (not polling) the
4585 * function will submit the command and returns immediately without waiting
4586 * for the mailbox completion. The no_wait is supported only when HBA
4587 * is in SLI2/SLI3 mode - interrupts are enabled.
4588 * The SLI interface allows only one mailbox pending at a time. If the
4589 * mailbox is issued in polling mode and there is already a mailbox
4590 * pending, then the function will return an error. If the mailbox is issued
4591 * in NO_WAIT mode and there is a mailbox pending already, the function
4592 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
4593 * The sli layer owns the mailbox object until the completion of mailbox
4594 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
4595 * return codes the caller owns the mailbox command after the return of
4596 * the function.
e59058c4 4597 **/
3772a991
JS
4598static int
4599lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4600 uint32_t flag)
dea3101e 4601{
dea3101e 4602 MAILBOX_t *mb;
2e0fef85 4603 struct lpfc_sli *psli = &phba->sli;
dea3101e
JB
4604 uint32_t status, evtctr;
4605 uint32_t ha_copy;
4606 int i;
09372820 4607 unsigned long timeout;
dea3101e 4608 unsigned long drvr_flag = 0;
34b02dcd 4609 uint32_t word0, ldata;
dea3101e 4610 void __iomem *to_slim;
58da1ffb
JS
4611 int processing_queue = 0;
4612
4613 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4614 if (!pmbox) {
8568a4d2 4615 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
58da1ffb 4616 /* processing mbox queue from intr_handler */
3772a991
JS
4617 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4618 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4619 return MBX_SUCCESS;
4620 }
58da1ffb 4621 processing_queue = 1;
58da1ffb
JS
4622 pmbox = lpfc_mbox_get(phba);
4623 if (!pmbox) {
4624 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4625 return MBX_SUCCESS;
4626 }
4627 }
dea3101e 4628
ed957684 4629 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
92d7f7b0 4630 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
ed957684 4631 if(!pmbox->vport) {
58da1ffb 4632 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
ed957684 4633 lpfc_printf_log(phba, KERN_ERR,
92d7f7b0 4634 LOG_MBOX | LOG_VPORT,
e8b62011 4635 "1806 Mbox x%x failed. No vport\n",
3772a991 4636 pmbox->u.mb.mbxCommand);
ed957684 4637 dump_stack();
58da1ffb 4638 goto out_not_finished;
ed957684
JS
4639 }
4640 }
4641
8d63f375 4642 /* If the PCI channel is in offline state, do not post mbox. */
58da1ffb
JS
4643 if (unlikely(pci_channel_offline(phba->pcidev))) {
4644 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4645 goto out_not_finished;
4646 }
8d63f375 4647
a257bf90
JS
4648 /* If HBA has a deferred error attention, fail the iocb. */
4649 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
4650 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4651 goto out_not_finished;
4652 }
4653
dea3101e 4654 psli = &phba->sli;
92d7f7b0 4655
3772a991 4656 mb = &pmbox->u.mb;
dea3101e
JB
4657 status = MBX_SUCCESS;
4658
2e0fef85
JS
4659 if (phba->link_state == LPFC_HBA_ERROR) {
4660 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
41415862
JW
4661
4662 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
4663 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4664 "(%d):0311 Mailbox command x%x cannot "
4665 "issue Data: x%x x%x\n",
4666 pmbox->vport ? pmbox->vport->vpi : 0,
4667 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 4668 goto out_not_finished;
41415862
JW
4669 }
4670
9290831f
JS
4671 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
4672 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2e0fef85 4673 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3772a991
JS
4674 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4675 "(%d):2528 Mailbox command x%x cannot "
4676 "issue Data: x%x x%x\n",
4677 pmbox->vport ? pmbox->vport->vpi : 0,
4678 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
58da1ffb 4679 goto out_not_finished;
9290831f
JS
4680 }
4681
dea3101e
JB
4682 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
4683 /* Polling for a mbox command when another one is already active
4684 * is not allowed in SLI. Also, the driver must have established
4685 * SLI2 mode to queue and process multiple mbox commands.
4686 */
4687
4688 if (flag & MBX_POLL) {
2e0fef85 4689 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e
JB
4690
4691 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
4692 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4693 "(%d):2529 Mailbox command x%x "
4694 "cannot issue Data: x%x x%x\n",
4695 pmbox->vport ? pmbox->vport->vpi : 0,
4696 pmbox->u.mb.mbxCommand,
4697 psli->sli_flag, flag);
58da1ffb 4698 goto out_not_finished;
dea3101e
JB
4699 }
4700
3772a991 4701 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
2e0fef85 4702 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 4703 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
4704 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4705 "(%d):2530 Mailbox command x%x "
4706 "cannot issue Data: x%x x%x\n",
4707 pmbox->vport ? pmbox->vport->vpi : 0,
4708 pmbox->u.mb.mbxCommand,
4709 psli->sli_flag, flag);
58da1ffb 4710 goto out_not_finished;
dea3101e
JB
4711 }
4712
dea3101e
JB
4713 /* Another mailbox command is still being processed, queue this
4714 * command to be processed later.
4715 */
4716 lpfc_mbox_put(phba, pmbox);
4717
4718 /* Mbox cmd issue - BUSY */
ed957684 4719 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 4720 "(%d):0308 Mbox cmd issue - BUSY Data: "
92d7f7b0 4721 "x%x x%x x%x x%x\n",
92d7f7b0
JS
4722 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
4723 mb->mbxCommand, phba->pport->port_state,
4724 psli->sli_flag, flag);
dea3101e
JB
4725
4726 psli->slistat.mbox_busy++;
2e0fef85 4727 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 4728
858c9f6c
JS
4729 if (pmbox->vport) {
4730 lpfc_debugfs_disc_trc(pmbox->vport,
4731 LPFC_DISC_TRC_MBOX_VPORT,
4732 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
4733 (uint32_t)mb->mbxCommand,
4734 mb->un.varWords[0], mb->un.varWords[1]);
4735 }
4736 else {
4737 lpfc_debugfs_disc_trc(phba->pport,
4738 LPFC_DISC_TRC_MBOX,
4739 "MBOX Bsy: cmd:x%x mb:x%x x%x",
4740 (uint32_t)mb->mbxCommand,
4741 mb->un.varWords[0], mb->un.varWords[1]);
4742 }
4743
2e0fef85 4744 return MBX_BUSY;
dea3101e
JB
4745 }
4746
dea3101e
JB
4747 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4748
4749 /* If we are not polling, we MUST be in SLI2 mode */
4750 if (flag != MBX_POLL) {
3772a991 4751 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
41415862 4752 (mb->mbxCommand != MBX_KILL_BOARD)) {
dea3101e 4753 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4754 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea3101e 4755 /* Mbox command <mbxCommand> cannot issue */
3772a991
JS
4756 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4757 "(%d):2531 Mailbox command x%x "
4758 "cannot issue Data: x%x x%x\n",
4759 pmbox->vport ? pmbox->vport->vpi : 0,
4760 pmbox->u.mb.mbxCommand,
4761 psli->sli_flag, flag);
58da1ffb 4762 goto out_not_finished;
dea3101e
JB
4763 }
4764 /* timeout active mbox command */
a309a6b6
JS
4765 mod_timer(&psli->mbox_tmo, (jiffies +
4766 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
dea3101e
JB
4767 }
4768
4769 /* Mailbox cmd <cmd> issue */
ed957684 4770 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
e8b62011 4771 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
92d7f7b0 4772 "x%x\n",
e8b62011 4773 pmbox->vport ? pmbox->vport->vpi : 0,
92d7f7b0
JS
4774 mb->mbxCommand, phba->pport->port_state,
4775 psli->sli_flag, flag);
dea3101e 4776
858c9f6c
JS
4777 if (mb->mbxCommand != MBX_HEARTBEAT) {
4778 if (pmbox->vport) {
4779 lpfc_debugfs_disc_trc(pmbox->vport,
4780 LPFC_DISC_TRC_MBOX_VPORT,
4781 "MBOX Send vport: cmd:x%x mb:x%x x%x",
4782 (uint32_t)mb->mbxCommand,
4783 mb->un.varWords[0], mb->un.varWords[1]);
4784 }
4785 else {
4786 lpfc_debugfs_disc_trc(phba->pport,
4787 LPFC_DISC_TRC_MBOX,
4788 "MBOX Send: cmd:x%x mb:x%x x%x",
4789 (uint32_t)mb->mbxCommand,
4790 mb->un.varWords[0], mb->un.varWords[1]);
4791 }
4792 }
4793
dea3101e
JB
4794 psli->slistat.mbox_cmd++;
4795 evtctr = psli->slistat.mbox_event;
4796
4797 /* next set own bit for the adapter and copy over command word */
4798 mb->mbxOwner = OWN_CHIP;
4799
3772a991 4800 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 4801 /* First copy command data to host SLIM area */
34b02dcd 4802 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e 4803 } else {
9290831f 4804 if (mb->mbxCommand == MBX_CONFIG_PORT) {
dea3101e 4805 /* copy command data into host mbox for cmpl */
34b02dcd 4806 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea3101e
JB
4807 }
4808
4809 /* First copy mbox command data to HBA SLIM, skip past first
4810 word */
4811 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4812 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
4813 MAILBOX_CMD_SIZE - sizeof (uint32_t));
4814
4815 /* Next copy over first word, with mbxOwner set */
34b02dcd 4816 ldata = *((uint32_t *)mb);
dea3101e
JB
4817 to_slim = phba->MBslimaddr;
4818 writel(ldata, to_slim);
4819 readl(to_slim); /* flush */
4820
4821 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4822 /* switch over to host mailbox */
3772a991 4823 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea3101e
JB
4824 }
4825 }
4826
4827 wmb();
dea3101e
JB
4828
4829 switch (flag) {
4830 case MBX_NOWAIT:
09372820 4831 /* Set up reference to mailbox command */
dea3101e 4832 psli->mbox_active = pmbox;
09372820
JS
4833 /* Interrupt board to do it */
4834 writel(CA_MBATT, phba->CAregaddr);
4835 readl(phba->CAregaddr); /* flush */
4836 /* Don't wait for it to finish, just return */
dea3101e
JB
4837 break;
4838
4839 case MBX_POLL:
09372820 4840 /* Set up null reference to mailbox command */
dea3101e 4841 psli->mbox_active = NULL;
09372820
JS
4842 /* Interrupt board to do it */
4843 writel(CA_MBATT, phba->CAregaddr);
4844 readl(phba->CAregaddr); /* flush */
4845
3772a991 4846 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 4847 /* First read mbox status word */
34b02dcd 4848 word0 = *((uint32_t *)phba->mbox);
dea3101e
JB
4849 word0 = le32_to_cpu(word0);
4850 } else {
4851 /* First read mbox status word */
4852 word0 = readl(phba->MBslimaddr);
4853 }
4854
4855 /* Read the HBA Host Attention Register */
4856 ha_copy = readl(phba->HAregaddr);
09372820
JS
4857 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
4858 mb->mbxCommand) *
4859 1000) + jiffies;
4860 i = 0;
dea3101e 4861 /* Wait for command to complete */
41415862
JW
4862 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
4863 (!(ha_copy & HA_MBATT) &&
2e0fef85 4864 (phba->link_state > LPFC_WARM_START))) {
09372820 4865 if (time_after(jiffies, timeout)) {
dea3101e 4866 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2e0fef85 4867 spin_unlock_irqrestore(&phba->hbalock,
dea3101e 4868 drvr_flag);
58da1ffb 4869 goto out_not_finished;
dea3101e
JB
4870 }
4871
4872 /* Check if we took a mbox interrupt while we were
4873 polling */
4874 if (((word0 & OWN_CHIP) != OWN_CHIP)
4875 && (evtctr != psli->slistat.mbox_event))
4876 break;
4877
09372820
JS
4878 if (i++ > 10) {
4879 spin_unlock_irqrestore(&phba->hbalock,
4880 drvr_flag);
4881 msleep(1);
4882 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4883 }
dea3101e 4884
3772a991 4885 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 4886 /* First copy command data */
34b02dcd 4887 word0 = *((uint32_t *)phba->mbox);
dea3101e
JB
4888 word0 = le32_to_cpu(word0);
4889 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4890 MAILBOX_t *slimmb;
34b02dcd 4891 uint32_t slimword0;
dea3101e
JB
4892 /* Check real SLIM for any errors */
4893 slimword0 = readl(phba->MBslimaddr);
4894 slimmb = (MAILBOX_t *) & slimword0;
4895 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
4896 && slimmb->mbxStatus) {
4897 psli->sli_flag &=
3772a991 4898 ~LPFC_SLI_ACTIVE;
dea3101e
JB
4899 word0 = slimword0;
4900 }
4901 }
4902 } else {
4903 /* First copy command data */
4904 word0 = readl(phba->MBslimaddr);
4905 }
4906 /* Read the HBA Host Attention Register */
4907 ha_copy = readl(phba->HAregaddr);
4908 }
4909
3772a991 4910 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea3101e 4911 /* copy results back to user */
34b02dcd 4912 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
dea3101e
JB
4913 } else {
4914 /* First copy command data */
4915 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
4916 MAILBOX_CMD_SIZE);
4917 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
4918 pmbox->context2) {
92d7f7b0 4919 lpfc_memcpy_from_slim((void *)pmbox->context2,
dea3101e
JB
4920 phba->MBslimaddr + DMP_RSP_OFFSET,
4921 mb->un.varDmp.word_cnt);
4922 }
4923 }
4924
4925 writel(HA_MBATT, phba->HAregaddr);
4926 readl(phba->HAregaddr); /* flush */
4927
4928 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4929 status = mb->mbxStatus;
4930 }
4931
2e0fef85
JS
4932 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4933 return status;
58da1ffb
JS
4934
4935out_not_finished:
4936 if (processing_queue) {
da0436e9 4937 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
58da1ffb
JS
4938 lpfc_mbox_cmpl_put(phba, pmbox);
4939 }
4940 return MBX_NOT_FINISHED;
dea3101e
JB
4941}
4942
f1126688
JS
4943/**
4944 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
4945 * @phba: Pointer to HBA context object.
4946 *
4947 * The function blocks the posting of SLI4 asynchronous mailbox commands from
4948 * the driver internal pending mailbox queue. It will then try to wait out the
4949 * possible outstanding mailbox command before return.
4950 *
4951 * Returns:
4952 * 0 - the outstanding mailbox command completed; otherwise, the wait for
4953 * the outstanding mailbox command timed out.
4954 **/
4955static int
4956lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
4957{
4958 struct lpfc_sli *psli = &phba->sli;
4959 uint8_t actcmd = MBX_HEARTBEAT;
4960 int rc = 0;
4961 unsigned long timeout;
4962
4963 /* Mark the asynchronous mailbox command posting as blocked */
4964 spin_lock_irq(&phba->hbalock);
4965 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
4966 if (phba->sli.mbox_active)
4967 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
4968 spin_unlock_irq(&phba->hbalock);
4969 /* Determine how long we might wait for the active mailbox
4970 * command to be gracefully completed by firmware.
4971 */
4972 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
4973 jiffies;
4974 /* Wait for the outstnading mailbox command to complete */
4975 while (phba->sli.mbox_active) {
4976 /* Check active mailbox complete status every 2ms */
4977 msleep(2);
4978 if (time_after(jiffies, timeout)) {
4979 /* Timeout, marked the outstanding cmd not complete */
4980 rc = 1;
4981 break;
4982 }
4983 }
4984
4985 /* Can not cleanly block async mailbox command, fails it */
4986 if (rc) {
4987 spin_lock_irq(&phba->hbalock);
4988 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4989 spin_unlock_irq(&phba->hbalock);
4990 }
4991 return rc;
4992}
4993
4994/**
4995 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
4996 * @phba: Pointer to HBA context object.
4997 *
4998 * The function unblocks and resume posting of SLI4 asynchronous mailbox
4999 * commands from the driver internal pending mailbox queue. It makes sure
5000 * that there is no outstanding mailbox command before resuming posting
5001 * asynchronous mailbox commands. If, for any reason, there is outstanding
5002 * mailbox command, it will try to wait it out before resuming asynchronous
5003 * mailbox command posting.
5004 **/
5005static void
5006lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5007{
5008 struct lpfc_sli *psli = &phba->sli;
5009
5010 spin_lock_irq(&phba->hbalock);
5011 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5012 /* Asynchronous mailbox posting is not blocked, do nothing */
5013 spin_unlock_irq(&phba->hbalock);
5014 return;
5015 }
5016
5017 /* Outstanding synchronous mailbox command is guaranteed to be done,
5018 * successful or timeout, after timing-out the outstanding mailbox
5019 * command shall always be removed, so just unblock posting async
5020 * mailbox command and resume
5021 */
5022 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5023 spin_unlock_irq(&phba->hbalock);
5024
5025 /* wake up worker thread to post asynchronlous mailbox command */
5026 lpfc_worker_wake_up(phba);
5027}
5028
da0436e9
JS
5029/**
5030 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5031 * @phba: Pointer to HBA context object.
5032 * @mboxq: Pointer to mailbox object.
5033 *
5034 * The function posts a mailbox to the port. The mailbox is expected
5035 * to be comletely filled in and ready for the port to operate on it.
5036 * This routine executes a synchronous completion operation on the
5037 * mailbox by polling for its completion.
5038 *
5039 * The caller must not be holding any locks when calling this routine.
5040 *
5041 * Returns:
5042 * MBX_SUCCESS - mailbox posted successfully
5043 * Any of the MBX error values.
5044 **/
5045static int
5046lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5047{
5048 int rc = MBX_SUCCESS;
5049 unsigned long iflag;
5050 uint32_t db_ready;
5051 uint32_t mcqe_status;
5052 uint32_t mbx_cmnd;
5053 unsigned long timeout;
5054 struct lpfc_sli *psli = &phba->sli;
5055 struct lpfc_mqe *mb = &mboxq->u.mqe;
5056 struct lpfc_bmbx_create *mbox_rgn;
5057 struct dma_address *dma_address;
5058 struct lpfc_register bmbx_reg;
5059
5060 /*
5061 * Only one mailbox can be active to the bootstrap mailbox region
5062 * at a time and there is no queueing provided.
5063 */
5064 spin_lock_irqsave(&phba->hbalock, iflag);
5065 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5066 spin_unlock_irqrestore(&phba->hbalock, iflag);
5067 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5068 "(%d):2532 Mailbox command x%x (x%x) "
5069 "cannot issue Data: x%x x%x\n",
5070 mboxq->vport ? mboxq->vport->vpi : 0,
5071 mboxq->u.mb.mbxCommand,
5072 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5073 psli->sli_flag, MBX_POLL);
5074 return MBXERR_ERROR;
5075 }
5076 /* The server grabs the token and owns it until release */
5077 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5078 phba->sli.mbox_active = mboxq;
5079 spin_unlock_irqrestore(&phba->hbalock, iflag);
5080
5081 /*
5082 * Initialize the bootstrap memory region to avoid stale data areas
5083 * in the mailbox post. Then copy the caller's mailbox contents to
5084 * the bmbx mailbox region.
5085 */
5086 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5087 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5088 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5089 sizeof(struct lpfc_mqe));
5090
5091 /* Post the high mailbox dma address to the port and wait for ready. */
5092 dma_address = &phba->sli4_hba.bmbx.dma_address;
5093 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5094
5095 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5096 * 1000) + jiffies;
5097 do {
5098 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5099 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5100 if (!db_ready)
5101 msleep(2);
5102
5103 if (time_after(jiffies, timeout)) {
5104 rc = MBXERR_ERROR;
5105 goto exit;
5106 }
5107 } while (!db_ready);
5108
5109 /* Post the low mailbox dma address to the port. */
5110 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5111 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5112 * 1000) + jiffies;
5113 do {
5114 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5115 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5116 if (!db_ready)
5117 msleep(2);
5118
5119 if (time_after(jiffies, timeout)) {
5120 rc = MBXERR_ERROR;
5121 goto exit;
5122 }
5123 } while (!db_ready);
5124
5125 /*
5126 * Read the CQ to ensure the mailbox has completed.
5127 * If so, update the mailbox status so that the upper layers
5128 * can complete the request normally.
5129 */
5130 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5131 sizeof(struct lpfc_mqe));
5132 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5133 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5134 sizeof(struct lpfc_mcqe));
5135 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5136
5137 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5138 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5139 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5140 rc = MBXERR_ERROR;
5141 }
5142
5143 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5144 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5145 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5146 " x%x x%x CQ: x%x x%x x%x x%x\n",
5147 mboxq->vport ? mboxq->vport->vpi : 0,
5148 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5149 bf_get(lpfc_mqe_status, mb),
5150 mb->un.mb_words[0], mb->un.mb_words[1],
5151 mb->un.mb_words[2], mb->un.mb_words[3],
5152 mb->un.mb_words[4], mb->un.mb_words[5],
5153 mb->un.mb_words[6], mb->un.mb_words[7],
5154 mb->un.mb_words[8], mb->un.mb_words[9],
5155 mb->un.mb_words[10], mb->un.mb_words[11],
5156 mb->un.mb_words[12], mboxq->mcqe.word0,
5157 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5158 mboxq->mcqe.trailer);
5159exit:
5160 /* We are holding the token, no needed for lock when release */
5161 spin_lock_irqsave(&phba->hbalock, iflag);
5162 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5163 phba->sli.mbox_active = NULL;
5164 spin_unlock_irqrestore(&phba->hbalock, iflag);
5165 return rc;
5166}
5167
5168/**
5169 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5170 * @phba: Pointer to HBA context object.
5171 * @pmbox: Pointer to mailbox object.
5172 * @flag: Flag indicating how the mailbox need to be processed.
5173 *
5174 * This function is called by discovery code and HBA management code to submit
5175 * a mailbox command to firmware with SLI-4 interface spec.
5176 *
5177 * Return codes the caller owns the mailbox command after the return of the
5178 * function.
5179 **/
5180static int
5181lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5182 uint32_t flag)
5183{
5184 struct lpfc_sli *psli = &phba->sli;
5185 unsigned long iflags;
5186 int rc;
5187
8fa38513
JS
5188 rc = lpfc_mbox_dev_check(phba);
5189 if (unlikely(rc)) {
5190 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5191 "(%d):2544 Mailbox command x%x (x%x) "
5192 "cannot issue Data: x%x x%x\n",
5193 mboxq->vport ? mboxq->vport->vpi : 0,
5194 mboxq->u.mb.mbxCommand,
5195 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5196 psli->sli_flag, flag);
5197 goto out_not_finished;
5198 }
5199
da0436e9
JS
5200 /* Detect polling mode and jump to a handler */
5201 if (!phba->sli4_hba.intr_enable) {
5202 if (flag == MBX_POLL)
5203 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5204 else
5205 rc = -EIO;
5206 if (rc != MBX_SUCCESS)
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5208 "(%d):2541 Mailbox command x%x "
5209 "(x%x) cannot issue Data: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag);
5214 return rc;
5215 } else if (flag == MBX_POLL) {
f1126688
JS
5216 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5217 "(%d):2542 Try to issue mailbox command "
5218 "x%x (x%x) synchronously ahead of async"
5219 "mailbox command queue: x%x x%x\n",
da0436e9
JS
5220 mboxq->vport ? mboxq->vport->vpi : 0,
5221 mboxq->u.mb.mbxCommand,
5222 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5223 psli->sli_flag, flag);
f1126688
JS
5224 /* Try to block the asynchronous mailbox posting */
5225 rc = lpfc_sli4_async_mbox_block(phba);
5226 if (!rc) {
5227 /* Successfully blocked, now issue sync mbox cmd */
5228 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5229 if (rc != MBX_SUCCESS)
5230 lpfc_printf_log(phba, KERN_ERR,
5231 LOG_MBOX | LOG_SLI,
5232 "(%d):2597 Mailbox command "
5233 "x%x (x%x) cannot issue "
5234 "Data: x%x x%x\n",
5235 mboxq->vport ?
5236 mboxq->vport->vpi : 0,
5237 mboxq->u.mb.mbxCommand,
5238 lpfc_sli4_mbox_opcode_get(phba,
5239 mboxq),
5240 psli->sli_flag, flag);
5241 /* Unblock the async mailbox posting afterward */
5242 lpfc_sli4_async_mbox_unblock(phba);
5243 }
5244 return rc;
da0436e9
JS
5245 }
5246
5247 /* Now, interrupt mode asynchrous mailbox command */
5248 rc = lpfc_mbox_cmd_check(phba, mboxq);
5249 if (rc) {
5250 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5251 "(%d):2543 Mailbox command x%x (x%x) "
5252 "cannot issue Data: x%x x%x\n",
5253 mboxq->vport ? mboxq->vport->vpi : 0,
5254 mboxq->u.mb.mbxCommand,
5255 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5256 psli->sli_flag, flag);
5257 goto out_not_finished;
5258 }
da0436e9
JS
5259
5260 /* Put the mailbox command to the driver internal FIFO */
5261 psli->slistat.mbox_busy++;
5262 spin_lock_irqsave(&phba->hbalock, iflags);
5263 lpfc_mbox_put(phba, mboxq);
5264 spin_unlock_irqrestore(&phba->hbalock, iflags);
5265 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5266 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5267 "x%x (x%x) x%x x%x x%x\n",
5268 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5269 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5270 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5271 phba->pport->port_state,
5272 psli->sli_flag, MBX_NOWAIT);
5273 /* Wake up worker thread to transport mailbox command from head */
5274 lpfc_worker_wake_up(phba);
5275
5276 return MBX_BUSY;
5277
5278out_not_finished:
5279 return MBX_NOT_FINISHED;
5280}
5281
5282/**
5283 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5284 * @phba: Pointer to HBA context object.
5285 *
5286 * This function is called by worker thread to send a mailbox command to
5287 * SLI4 HBA firmware.
5288 *
5289 **/
5290int
5291lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5292{
5293 struct lpfc_sli *psli = &phba->sli;
5294 LPFC_MBOXQ_t *mboxq;
5295 int rc = MBX_SUCCESS;
5296 unsigned long iflags;
5297 struct lpfc_mqe *mqe;
5298 uint32_t mbx_cmnd;
5299
5300 /* Check interrupt mode before post async mailbox command */
5301 if (unlikely(!phba->sli4_hba.intr_enable))
5302 return MBX_NOT_FINISHED;
5303
5304 /* Check for mailbox command service token */
5305 spin_lock_irqsave(&phba->hbalock, iflags);
5306 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5307 spin_unlock_irqrestore(&phba->hbalock, iflags);
5308 return MBX_NOT_FINISHED;
5309 }
5310 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5311 spin_unlock_irqrestore(&phba->hbalock, iflags);
5312 return MBX_NOT_FINISHED;
5313 }
5314 if (unlikely(phba->sli.mbox_active)) {
5315 spin_unlock_irqrestore(&phba->hbalock, iflags);
5316 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5317 "0384 There is pending active mailbox cmd\n");
5318 return MBX_NOT_FINISHED;
5319 }
5320 /* Take the mailbox command service token */
5321 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5322
5323 /* Get the next mailbox command from head of queue */
5324 mboxq = lpfc_mbox_get(phba);
5325
5326 /* If no more mailbox command waiting for post, we're done */
5327 if (!mboxq) {
5328 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5329 spin_unlock_irqrestore(&phba->hbalock, iflags);
5330 return MBX_SUCCESS;
5331 }
5332 phba->sli.mbox_active = mboxq;
5333 spin_unlock_irqrestore(&phba->hbalock, iflags);
5334
5335 /* Check device readiness for posting mailbox command */
5336 rc = lpfc_mbox_dev_check(phba);
5337 if (unlikely(rc))
5338 /* Driver clean routine will clean up pending mailbox */
5339 goto out_not_finished;
5340
5341 /* Prepare the mbox command to be posted */
5342 mqe = &mboxq->u.mqe;
5343 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5344
5345 /* Start timer for the mbox_tmo and log some mailbox post messages */
5346 mod_timer(&psli->mbox_tmo, (jiffies +
5347 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5348
5349 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5350 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5351 "x%x x%x\n",
5352 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5353 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5354 phba->pport->port_state, psli->sli_flag);
5355
5356 if (mbx_cmnd != MBX_HEARTBEAT) {
5357 if (mboxq->vport) {
5358 lpfc_debugfs_disc_trc(mboxq->vport,
5359 LPFC_DISC_TRC_MBOX_VPORT,
5360 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5361 mbx_cmnd, mqe->un.mb_words[0],
5362 mqe->un.mb_words[1]);
5363 } else {
5364 lpfc_debugfs_disc_trc(phba->pport,
5365 LPFC_DISC_TRC_MBOX,
5366 "MBOX Send: cmd:x%x mb:x%x x%x",
5367 mbx_cmnd, mqe->un.mb_words[0],
5368 mqe->un.mb_words[1]);
5369 }
5370 }
5371 psli->slistat.mbox_cmd++;
5372
5373 /* Post the mailbox command to the port */
5374 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5375 if (rc != MBX_SUCCESS) {
5376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5377 "(%d):2533 Mailbox command x%x (x%x) "
5378 "cannot issue Data: x%x x%x\n",
5379 mboxq->vport ? mboxq->vport->vpi : 0,
5380 mboxq->u.mb.mbxCommand,
5381 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5382 psli->sli_flag, MBX_NOWAIT);
5383 goto out_not_finished;
5384 }
5385
5386 return rc;
5387
5388out_not_finished:
5389 spin_lock_irqsave(&phba->hbalock, iflags);
5390 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5391 __lpfc_mbox_cmpl_put(phba, mboxq);
5392 /* Release the token */
5393 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5394 phba->sli.mbox_active = NULL;
5395 spin_unlock_irqrestore(&phba->hbalock, iflags);
5396
5397 return MBX_NOT_FINISHED;
5398}
5399
5400/**
5401 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5402 * @phba: Pointer to HBA context object.
5403 * @pmbox: Pointer to mailbox object.
5404 * @flag: Flag indicating how the mailbox need to be processed.
5405 *
5406 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5407 * the API jump table function pointer from the lpfc_hba struct.
5408 *
5409 * Return codes the caller owns the mailbox command after the return of the
5410 * function.
5411 **/
5412int
5413lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5414{
5415 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5416}
5417
5418/**
5419 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5420 * @phba: The hba struct for which this call is being executed.
5421 * @dev_grp: The HBA PCI-Device group number.
5422 *
5423 * This routine sets up the mbox interface API function jump table in @phba
5424 * struct.
5425 * Returns: 0 - success, -ENODEV - failure.
5426 **/
5427int
5428lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5429{
5430
5431 switch (dev_grp) {
5432 case LPFC_PCI_DEV_LP:
5433 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5434 phba->lpfc_sli_handle_slow_ring_event =
5435 lpfc_sli_handle_slow_ring_event_s3;
5436 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5437 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5438 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5439 break;
5440 case LPFC_PCI_DEV_OC:
5441 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5442 phba->lpfc_sli_handle_slow_ring_event =
5443 lpfc_sli_handle_slow_ring_event_s4;
5444 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5445 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5446 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5447 break;
5448 default:
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "1420 Invalid HBA PCI-device group: 0x%x\n",
5451 dev_grp);
5452 return -ENODEV;
5453 break;
5454 }
5455 return 0;
5456}
5457
e59058c4 5458/**
3621a710 5459 * __lpfc_sli_ringtx_put - Add an iocb to the txq
e59058c4
JS
5460 * @phba: Pointer to HBA context object.
5461 * @pring: Pointer to driver SLI ring object.
5462 * @piocb: Pointer to address of newly added command iocb.
5463 *
5464 * This function is called with hbalock held to add a command
5465 * iocb to the txq when SLI layer cannot submit the command iocb
5466 * to the ring.
5467 **/
858c9f6c 5468static void
92d7f7b0 5469__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 5470 struct lpfc_iocbq *piocb)
dea3101e
JB
5471{
5472 /* Insert the caller's iocb in the txq tail for later processing. */
5473 list_add_tail(&piocb->list, &pring->txq);
5474 pring->txq_cnt++;
dea3101e
JB
5475}
5476
e59058c4 5477/**
3621a710 5478 * lpfc_sli_next_iocb - Get the next iocb in the txq
e59058c4
JS
5479 * @phba: Pointer to HBA context object.
5480 * @pring: Pointer to driver SLI ring object.
5481 * @piocb: Pointer to address of newly added command iocb.
5482 *
5483 * This function is called with hbalock held before a new
5484 * iocb is submitted to the firmware. This function checks
5485 * txq to flush the iocbs in txq to Firmware before
5486 * submitting new iocbs to the Firmware.
5487 * If there are iocbs in the txq which need to be submitted
5488 * to firmware, lpfc_sli_next_iocb returns the first element
5489 * of the txq after dequeuing it from txq.
5490 * If there is no iocb in the txq then the function will return
5491 * *piocb and *piocb is set to NULL. Caller needs to check
5492 * *piocb to find if there are more commands in the txq.
5493 **/
dea3101e
JB
5494static struct lpfc_iocbq *
5495lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2e0fef85 5496 struct lpfc_iocbq **piocb)
dea3101e
JB
5497{
5498 struct lpfc_iocbq * nextiocb;
5499
5500 nextiocb = lpfc_sli_ringtx_get(phba, pring);
5501 if (!nextiocb) {
5502 nextiocb = *piocb;
5503 *piocb = NULL;
5504 }
5505
5506 return nextiocb;
5507}
5508
e59058c4 5509/**
3772a991 5510 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
e59058c4 5511 * @phba: Pointer to HBA context object.
3772a991 5512 * @ring_number: SLI ring number to issue iocb on.
e59058c4
JS
5513 * @piocb: Pointer to command iocb.
5514 * @flag: Flag indicating if this command can be put into txq.
5515 *
3772a991
JS
5516 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
5517 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
5518 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
5519 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
5520 * this function allows only iocbs for posting buffers. This function finds
5521 * next available slot in the command ring and posts the command to the
5522 * available slot and writes the port attention register to request HBA start
5523 * processing new iocb. If there is no slot available in the ring and
5524 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
5525 * the function returns IOCB_BUSY.
e59058c4 5526 *
3772a991
JS
5527 * This function is called with hbalock held. The function will return success
5528 * after it successfully submit the iocb to firmware or after adding to the
5529 * txq.
e59058c4 5530 **/
98c9ea5c 5531static int
3772a991 5532__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea3101e
JB
5533 struct lpfc_iocbq *piocb, uint32_t flag)
5534{
5535 struct lpfc_iocbq *nextiocb;
5536 IOCB_t *iocb;
3772a991 5537 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
dea3101e 5538
92d7f7b0
JS
5539 if (piocb->iocb_cmpl && (!piocb->vport) &&
5540 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
5541 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
5542 lpfc_printf_log(phba, KERN_ERR,
5543 LOG_SLI | LOG_VPORT,
e8b62011 5544 "1807 IOCB x%x failed. No vport\n",
92d7f7b0
JS
5545 piocb->iocb.ulpCommand);
5546 dump_stack();
5547 return IOCB_ERROR;
5548 }
5549
5550
8d63f375
LV
5551 /* If the PCI channel is in offline state, do not post iocbs. */
5552 if (unlikely(pci_channel_offline(phba->pcidev)))
5553 return IOCB_ERROR;
5554
a257bf90
JS
5555 /* If HBA has a deferred error attention, fail the iocb. */
5556 if (unlikely(phba->hba_flag & DEFER_ERATT))
5557 return IOCB_ERROR;
5558
dea3101e
JB
5559 /*
5560 * We should never get an IOCB if we are in a < LINK_DOWN state
5561 */
2e0fef85 5562 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea3101e
JB
5563 return IOCB_ERROR;
5564
5565 /*
5566 * Check to see if we are blocking IOCB processing because of a
0b727fea 5567 * outstanding event.
dea3101e 5568 */
0b727fea 5569 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea3101e
JB
5570 goto iocb_busy;
5571
2e0fef85 5572 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea3101e 5573 /*
2680eeaa 5574 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea3101e
JB
5575 * can be issued if the link is not up.
5576 */
5577 switch (piocb->iocb.ulpCommand) {
84774a4d
JS
5578 case CMD_GEN_REQUEST64_CR:
5579 case CMD_GEN_REQUEST64_CX:
5580 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5581 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
6a9c52cf 5582 FC_RCTL_DD_UNSOL_CMD) ||
84774a4d
JS
5583 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5584 MENLO_TRANSPORT_TYPE))
5585
5586 goto iocb_busy;
5587 break;
dea3101e
JB
5588 case CMD_QUE_RING_BUF_CN:
5589 case CMD_QUE_RING_BUF64_CN:
dea3101e
JB
5590 /*
5591 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
5592 * completion, iocb_cmpl MUST be 0.
5593 */
5594 if (piocb->iocb_cmpl)
5595 piocb->iocb_cmpl = NULL;
5596 /*FALLTHROUGH*/
5597 case CMD_CREATE_XRI_CR:
2680eeaa
JS
5598 case CMD_CLOSE_XRI_CN:
5599 case CMD_CLOSE_XRI_CX:
dea3101e
JB
5600 break;
5601 default:
5602 goto iocb_busy;
5603 }
5604
5605 /*
5606 * For FCP commands, we must be in a state where we can process link
5607 * attention events.
5608 */
5609 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
92d7f7b0 5610 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea3101e 5611 goto iocb_busy;
92d7f7b0 5612 }
dea3101e 5613
dea3101e
JB
5614 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
5615 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
5616 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
5617
5618 if (iocb)
5619 lpfc_sli_update_ring(phba, pring);
5620 else
5621 lpfc_sli_update_full_ring(phba, pring);
5622
5623 if (!piocb)
5624 return IOCB_SUCCESS;
5625
5626 goto out_busy;
5627
5628 iocb_busy:
5629 pring->stats.iocb_cmd_delay++;
5630
5631 out_busy:
5632
5633 if (!(flag & SLI_IOCB_RET_IOCB)) {
92d7f7b0 5634 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea3101e
JB
5635 return IOCB_SUCCESS;
5636 }
5637
5638 return IOCB_BUSY;
5639}
5640
3772a991 5641/**
4f774513
JS
5642 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5643 * @phba: Pointer to HBA context object.
5644 * @piocb: Pointer to command iocb.
5645 * @sglq: Pointer to the scatter gather queue object.
5646 *
5647 * This routine converts the bpl or bde that is in the IOCB
5648 * to a sgl list for the sli4 hardware. The physical address
5649 * of the bpl/bde is converted back to a virtual address.
5650 * If the IOCB contains a BPL then the list of BDE's is
5651 * converted to sli4_sge's. If the IOCB contains a single
5652 * BDE then it is converted to a single sli_sge.
5653 * The IOCB is still in cpu endianess so the contents of
5654 * the bpl can be used without byte swapping.
5655 *
5656 * Returns valid XRI = Success, NO_XRI = Failure.
5657**/
5658static uint16_t
5659lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5660 struct lpfc_sglq *sglq)
3772a991 5661{
4f774513
JS
5662 uint16_t xritag = NO_XRI;
5663 struct ulp_bde64 *bpl = NULL;
5664 struct ulp_bde64 bde;
5665 struct sli4_sge *sgl = NULL;
5666 IOCB_t *icmd;
5667 int numBdes = 0;
5668 int i = 0;
3772a991 5669
4f774513
JS
5670 if (!piocbq || !sglq)
5671 return xritag;
5672
5673 sgl = (struct sli4_sge *)sglq->sgl;
5674 icmd = &piocbq->iocb;
5675 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5676 numBdes = icmd->un.genreq64.bdl.bdeSize /
5677 sizeof(struct ulp_bde64);
5678 /* The addrHigh and addrLow fields within the IOCB
5679 * have not been byteswapped yet so there is no
5680 * need to swap them back.
5681 */
5682 bpl = (struct ulp_bde64 *)
5683 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5684
5685 if (!bpl)
5686 return xritag;
5687
5688 for (i = 0; i < numBdes; i++) {
5689 /* Should already be byte swapped. */
5690 sgl->addr_hi = bpl->addrHigh;
5691 sgl->addr_lo = bpl->addrLow;
5692 /* swap the size field back to the cpu so we
5693 * can assign it to the sgl.
5694 */
5695 bde.tus.w = le32_to_cpu(bpl->tus.w);
5696 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5697 if ((i+1) == numBdes)
5698 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 else
5700 bf_set(lpfc_sli4_sge_last, sgl, 0);
5701 sgl->word2 = cpu_to_le32(sgl->word2);
5702 sgl->word3 = cpu_to_le32(sgl->word3);
5703 bpl++;
5704 sgl++;
5705 }
5706 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5707 /* The addrHigh and addrLow fields of the BDE have not
5708 * been byteswapped yet so they need to be swapped
5709 * before putting them in the sgl.
5710 */
5711 sgl->addr_hi =
5712 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5713 sgl->addr_lo =
5714 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5715 bf_set(lpfc_sli4_sge_len, sgl,
5716 icmd->un.genreq64.bdl.bdeSize);
5717 bf_set(lpfc_sli4_sge_last, sgl, 1);
5718 sgl->word2 = cpu_to_le32(sgl->word2);
5719 sgl->word3 = cpu_to_le32(sgl->word3);
5720 }
5721 return sglq->sli4_xritag;
3772a991 5722}
92d7f7b0 5723
e59058c4 5724/**
4f774513 5725 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
e59058c4 5726 * @phba: Pointer to HBA context object.
e59058c4 5727 *
4f774513 5728 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
8fa38513
JS
5729 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
5730 * held.
4f774513
JS
5731 *
5732 * Return: index into SLI4 fast-path FCP queue index.
e59058c4 5733 **/
4f774513 5734static uint32_t
8fa38513 5735lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
92d7f7b0 5736{
8fa38513
JS
5737 ++phba->fcp_qidx;
5738 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
5739 phba->fcp_qidx = 0;
92d7f7b0 5740
8fa38513 5741 return phba->fcp_qidx;
92d7f7b0
JS
5742}
5743
e59058c4 5744/**
4f774513 5745 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
e59058c4 5746 * @phba: Pointer to HBA context object.
4f774513
JS
5747 * @piocb: Pointer to command iocb.
5748 * @wqe: Pointer to the work queue entry.
e59058c4 5749 *
4f774513
JS
5750 * This routine converts the iocb command to its Work Queue Entry
5751 * equivalent. The wqe pointer should not have any fields set when
5752 * this routine is called because it will memcpy over them.
5753 * This routine does not set the CQ_ID or the WQEC bits in the
5754 * wqe.
e59058c4 5755 *
4f774513 5756 * Returns: 0 = Success, IOCB_ERROR = Failure.
e59058c4 5757 **/
cf5bf97e 5758static int
4f774513
JS
5759lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5760 union lpfc_wqe *wqe)
cf5bf97e 5761{
5ffc266e 5762 uint32_t xmit_len = 0, total_len = 0;
4f774513
JS
5763 uint8_t ct = 0;
5764 uint32_t fip;
5765 uint32_t abort_tag;
5766 uint8_t command_type = ELS_COMMAND_NON_FIP;
5767 uint8_t cmnd;
5768 uint16_t xritag;
5769 struct ulp_bde64 *bpl = NULL;
c868595d 5770 uint32_t els_id = ELS_ID_DEFAULT;
5ffc266e
JS
5771 int numBdes, i;
5772 struct ulp_bde64 bde;
4f774513 5773
45ed1190 5774 fip = phba->hba_flag & HBA_FIP_SUPPORT;
4f774513 5775 /* The fcp commands will set command type */
0c287589 5776 if (iocbq->iocb_flag & LPFC_IO_FCP)
4f774513 5777 command_type = FCP_COMMAND;
c868595d 5778 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
0c287589
JS
5779 command_type = ELS_COMMAND_FIP;
5780 else
5781 command_type = ELS_COMMAND_NON_FIP;
5782
4f774513
JS
5783 /* Some of the fields are in the right position already */
5784 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5785 abort_tag = (uint32_t) iocbq->iotag;
5786 xritag = iocbq->sli4_xritag;
5787 wqe->words[7] = 0; /* The ct field has moved so reset */
5788 /* words0-2 bpl convert bde */
5789 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5ffc266e
JS
5790 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
5791 sizeof(struct ulp_bde64);
4f774513
JS
5792 bpl = (struct ulp_bde64 *)
5793 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5794 if (!bpl)
5795 return IOCB_ERROR;
cf5bf97e 5796
4f774513
JS
5797 /* Should already be byte swapped. */
5798 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5799 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5800 /* swap the size field back to the cpu so we
5801 * can assign it to the sgl.
5802 */
5803 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5ffc266e
JS
5804 xmit_len = wqe->generic.bde.tus.f.bdeSize;
5805 total_len = 0;
5806 for (i = 0; i < numBdes; i++) {
5807 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
5808 total_len += bde.tus.f.bdeSize;
5809 }
4f774513 5810 } else
5ffc266e 5811 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
cf5bf97e 5812
4f774513
JS
5813 iocbq->iocb.ulpIoTag = iocbq->iotag;
5814 cmnd = iocbq->iocb.ulpCommand;
a4bc3379 5815
4f774513
JS
5816 switch (iocbq->iocb.ulpCommand) {
5817 case CMD_ELS_REQUEST64_CR:
5818 if (!iocbq->iocb.ulpLe) {
5819 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5820 "2007 Only Limited Edition cmd Format"
5821 " supported 0x%x\n",
5822 iocbq->iocb.ulpCommand);
5823 return IOCB_ERROR;
5824 }
5ffc266e 5825 wqe->els_req.payload_len = xmit_len;
4f774513
JS
5826 /* Els_reguest64 has a TMO */
5827 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5828 iocbq->iocb.ulpTimeout);
5829 /* Need a VF for word 4 set the vf bit*/
5830 bf_set(els_req64_vf, &wqe->els_req, 0);
5831 /* And a VFID for word 12 */
5832 bf_set(els_req64_vfid, &wqe->els_req, 0);
5833 /*
5834 * Set ct field to 3, indicates that the context_tag field
5835 * contains the FCFI and remote N_Port_ID is
5836 * in word 5.
5837 */
5838
5839 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5840 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5841 iocbq->iocb.ulpContext);
5842
4f774513
JS
5843 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5844 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5845 /* CCP CCPE PV PRI in word10 were set in the memcpy */
c868595d
JS
5846
5847 if (command_type == ELS_COMMAND_FIP) {
5848 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
5849 >> LPFC_FIP_ELS_ID_SHIFT);
5850 }
5851 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
5852
4f774513 5853 break;
5ffc266e
JS
5854 case CMD_XMIT_SEQUENCE64_CX:
5855 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5856 iocbq->iocb.un.ulpWord[3]);
5857 wqe->generic.word3 = 0;
5858 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5ffc266e
JS
5859 /* The entire sequence is transmitted for this IOCB */
5860 xmit_len = total_len;
5861 cmnd = CMD_XMIT_SEQUENCE64_CR;
4f774513
JS
5862 case CMD_XMIT_SEQUENCE64_CR:
5863 /* word3 iocb=io_tag32 wqe=payload_offset */
5864 /* payload offset used for multilpe outstanding
5865 * sequences on the same exchange
5866 */
5867 wqe->words[3] = 0;
5868 /* word4 relative_offset memcpy */
5869 /* word5 r_ctl/df_ctl memcpy */
5870 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5ffc266e
JS
5871 wqe->xmit_sequence.xmit_len = xmit_len;
5872 command_type = OTHER_COMMAND;
4f774513
JS
5873 break;
5874 case CMD_XMIT_BCAST64_CN:
5875 /* word3 iocb=iotag32 wqe=payload_len */
5876 wqe->words[3] = 0; /* no definition for this in wqe */
5877 /* word4 iocb=rsvd wqe=rsvd */
5878 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5879 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5880 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5881 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5882 break;
5883 case CMD_FCP_IWRITE64_CR:
5884 command_type = FCP_COMMAND_DATA_OUT;
5885 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5886 * confusing.
5887 * word3 is payload_len: byte offset to the sgl entry for the
5888 * fcp_command.
5889 * word4 is total xfer len, same as the IOCB->ulpParameter.
5890 * word5 is initial xfer len 0 = wait for xfer-ready
5891 */
5892
5893 /* Always wait for xfer-ready before sending data */
5894 wqe->fcp_iwrite.initial_xfer_len = 0;
5895 /* word 4 (xfer length) should have been set on the memcpy */
5896
5897 /* allow write to fall through to read */
5898 case CMD_FCP_IREAD64_CR:
5899 /* FCP_CMD is always the 1st sgl entry */
5900 wqe->fcp_iread.payload_len =
5ffc266e 5901 xmit_len + sizeof(struct fcp_rsp);
4f774513
JS
5902
5903 /* word 4 (xfer length) should have been set on the memcpy */
5904
5905 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5906 iocbq->iocb.ulpFCP2Rcvy);
5907 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5908 /* The XC bit and the XS bit are similar. The driver never
5909 * tracked whether or not the exchange was previouslly open.
5910 * XC = Exchange create, 0 is create. 1 is already open.
5911 * XS = link cmd: 1 do not close the exchange after command.
5912 * XS = 0 close exchange when command completes.
5913 * The only time we would not set the XC bit is when the XS bit
5914 * is set and we are sending our 2nd or greater command on
5915 * this exchange.
5916 */
f1126688
JS
5917 /* Always open the exchange */
5918 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
4f774513 5919
f1126688
JS
5920 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5921 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5922 break;
4f774513
JS
5923 case CMD_FCP_ICMND64_CR:
5924 /* Always open the exchange */
5925 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5926
f1126688 5927 wqe->words[4] = 0;
4f774513 5928 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
f1126688 5929 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
4f774513
JS
5930 break;
5931 case CMD_GEN_REQUEST64_CR:
5932 /* word3 command length is described as byte offset to the
5933 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5934 * sgl[0] = cmnd
5935 * sgl[1] = rsp.
5936 *
5937 */
5ffc266e 5938 wqe->gen_req.command_len = xmit_len;
4f774513
JS
5939 /* Word4 parameter copied in the memcpy */
5940 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5941 /* word6 context tag copied in memcpy */
5942 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
5943 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5944 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5945 "2015 Invalid CT %x command 0x%x\n",
5946 ct, iocbq->iocb.ulpCommand);
5947 return IOCB_ERROR;
5948 }
5949 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5950 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5951 iocbq->iocb.ulpTimeout);
5952
5953 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5954 command_type = OTHER_COMMAND;
5955 break;
5956 case CMD_XMIT_ELS_RSP64_CX:
5957 /* words0-2 BDE memcpy */
5958 /* word3 iocb=iotag32 wqe=rsvd */
5959 wqe->words[3] = 0;
5960 /* word4 iocb=did wge=rsvd. */
5961 wqe->words[4] = 0;
5962 /* word5 iocb=rsvd wge=did */
5963 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5964 iocbq->iocb.un.elsreq64.remoteID);
5965
5966 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5967 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5968
5969 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5970 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5971 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5972 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5973 iocbq->vport->vpi + phba->vpi_base);
5974 command_type = OTHER_COMMAND;
5975 break;
5976 case CMD_CLOSE_XRI_CN:
5977 case CMD_ABORT_XRI_CN:
5978 case CMD_ABORT_XRI_CX:
5979 /* words 0-2 memcpy should be 0 rserved */
5980 /* port will send abts */
5981 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5982 /*
5983 * The link is down so the fw does not need to send abts
5984 * on the wire.
5985 */
5986 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5987 else
5988 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5989 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5990 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5991 wqe->words[5] = 0;
5992 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5993 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5994 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5995 wqe->generic.abort_tag = abort_tag;
5996 /*
5997 * The abort handler will send us CMD_ABORT_XRI_CN or
5998 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5999 */
6000 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
6001 cmnd = CMD_ABORT_XRI_CX;
6002 command_type = OTHER_COMMAND;
6003 xritag = 0;
6004 break;
6669f9bb
JS
6005 case CMD_XMIT_BLS_RSP64_CX:
6006 /* As BLS ABTS-ACC WQE is very different from other WQEs,
6007 * we re-construct this WQE here based on information in
6008 * iocbq from scratch.
6009 */
6010 memset(wqe, 0, sizeof(union lpfc_wqe));
5ffc266e 6011 /* OX_ID is invariable to who sent ABTS to CT exchange */
6669f9bb 6012 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
5ffc266e
JS
6013 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
6014 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
6015 LPFC_ABTS_UNSOL_INT) {
6016 /* ABTS sent by initiator to CT exchange, the
6017 * RX_ID field will be filled with the newly
6018 * allocated responder XRI.
6019 */
6020 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6021 iocbq->sli4_xritag);
6022 } else {
6023 /* ABTS sent by responder to CT exchange, the
6024 * RX_ID field will be filled with the responder
6025 * RX_ID from ABTS.
6026 */
6027 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6028 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
6029 }
6669f9bb
JS
6030 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
6031 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6032 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6033 iocbq->iocb.ulpContext);
6034 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6035 command_type = OTHER_COMMAND;
6036 break;
4f774513
JS
6037 case CMD_XRI_ABORTED_CX:
6038 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6039 /* words0-2 are all 0's no bde */
6040 /* word3 and word4 are rsvrd */
6041 wqe->words[3] = 0;
6042 wqe->words[4] = 0;
6043 /* word5 iocb=rsvd wge=did */
6044 /* There is no remote port id in the IOCB? */
6045 /* Let this fall through and fail */
6046 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
6047 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
6048 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
6049 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
6050 default:
6051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6052 "2014 Invalid command 0x%x\n",
6053 iocbq->iocb.ulpCommand);
6054 return IOCB_ERROR;
6055 break;
6056
6057 }
6058 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
6059 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
6060 wqe->generic.abort_tag = abort_tag;
6061 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
6062 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
6063 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
6064 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
6065
6066 return 0;
6067}
6068
6069/**
6070 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
6071 * @phba: Pointer to HBA context object.
6072 * @ring_number: SLI ring number to issue iocb on.
6073 * @piocb: Pointer to command iocb.
6074 * @flag: Flag indicating if this command can be put into txq.
6075 *
6076 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6077 * an iocb command to an HBA with SLI-4 interface spec.
6078 *
6079 * This function is called with hbalock held. The function will return success
6080 * after it successfully submit the iocb to firmware or after adding to the
6081 * txq.
6082 **/
6083static int
6084__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6085 struct lpfc_iocbq *piocb, uint32_t flag)
6086{
6087 struct lpfc_sglq *sglq;
6088 uint16_t xritag;
6089 union lpfc_wqe wqe;
6090 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
4f774513
JS
6091
6092 if (piocb->sli4_xritag == NO_XRI) {
6093 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6669f9bb 6094 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
4f774513
JS
6095 sglq = NULL;
6096 else {
6097 sglq = __lpfc_sli_get_sglq(phba);
6098 if (!sglq)
6099 return IOCB_ERROR;
6100 piocb->sli4_xritag = sglq->sli4_xritag;
6101 }
6102 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6103 sglq = NULL; /* These IO's already have an XRI and
6104 * a mapped sgl.
6105 */
6106 } else {
6107 /* This is a continuation of a commandi,(CX) so this
6108 * sglq is on the active list
6109 */
6110 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6111 if (!sglq)
6112 return IOCB_ERROR;
6113 }
6114
6115 if (sglq) {
6116 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6117 if (xritag != sglq->sli4_xritag)
6118 return IOCB_ERROR;
6119 }
6120
6121 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6122 return IOCB_ERROR;
6123
6124 if (piocb->iocb_flag & LPFC_IO_FCP) {
5ffc266e
JS
6125 /*
6126 * For FCP command IOCB, get a new WQ index to distribute
6127 * WQE across the WQsr. On the other hand, for abort IOCB,
6128 * it carries the same WQ index to the original command
6129 * IOCB.
6130 */
6131 if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
6132 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
6133 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6134 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
6135 &wqe))
4f774513
JS
6136 return IOCB_ERROR;
6137 } else {
6138 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6139 return IOCB_ERROR;
6140 }
6141 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6142
6143 return 0;
6144}
6145
6146/**
6147 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6148 *
6149 * This routine wraps the actual lockless version for issusing IOCB function
6150 * pointer from the lpfc_hba struct.
6151 *
6152 * Return codes:
6153 * IOCB_ERROR - Error
6154 * IOCB_SUCCESS - Success
6155 * IOCB_BUSY - Busy
6156 **/
6157static inline int
6158__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6159 struct lpfc_iocbq *piocb, uint32_t flag)
6160{
6161 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6162}
6163
6164/**
6165 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6166 * @phba: The hba struct for which this call is being executed.
6167 * @dev_grp: The HBA PCI-Device group number.
6168 *
6169 * This routine sets up the SLI interface API function jump table in @phba
6170 * struct.
6171 * Returns: 0 - success, -ENODEV - failure.
6172 **/
6173int
6174lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6175{
6176
6177 switch (dev_grp) {
6178 case LPFC_PCI_DEV_LP:
6179 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6180 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6181 break;
6182 case LPFC_PCI_DEV_OC:
6183 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6184 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6185 break;
6186 default:
6187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6188 "1419 Invalid HBA PCI-device group: 0x%x\n",
6189 dev_grp);
6190 return -ENODEV;
6191 break;
6192 }
6193 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6194 return 0;
6195}
6196
6197/**
6198 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
6199 * @phba: Pointer to HBA context object.
6200 * @pring: Pointer to driver SLI ring object.
6201 * @piocb: Pointer to command iocb.
6202 * @flag: Flag indicating if this command can be put into txq.
6203 *
6204 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
6205 * function. This function gets the hbalock and calls
6206 * __lpfc_sli_issue_iocb function and will return the error returned
6207 * by __lpfc_sli_issue_iocb function. This wrapper is used by
6208 * functions which do not hold hbalock.
6209 **/
6210int
6211lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6212 struct lpfc_iocbq *piocb, uint32_t flag)
6213{
6214 unsigned long iflags;
6215 int rc;
6216
6217 spin_lock_irqsave(&phba->hbalock, iflags);
6218 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6219 spin_unlock_irqrestore(&phba->hbalock, iflags);
6220
6221 return rc;
6222}
6223
6224/**
6225 * lpfc_extra_ring_setup - Extra ring setup function
6226 * @phba: Pointer to HBA context object.
6227 *
6228 * This function is called while driver attaches with the
6229 * HBA to setup the extra ring. The extra ring is used
6230 * only when driver needs to support target mode functionality
6231 * or IP over FC functionalities.
6232 *
6233 * This function is called with no lock held.
6234 **/
6235static int
6236lpfc_extra_ring_setup( struct lpfc_hba *phba)
6237{
6238 struct lpfc_sli *psli;
6239 struct lpfc_sli_ring *pring;
6240
6241 psli = &phba->sli;
6242
6243 /* Adjust cmd/rsp ring iocb entries more evenly */
6244
6245 /* Take some away from the FCP ring */
6246 pring = &psli->ring[psli->fcp_ring];
6247 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6248 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
cf5bf97e
JW
6249 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6250 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6251
a4bc3379
JS
6252 /* and give them to the extra ring */
6253 pring = &psli->ring[psli->extra_ring];
6254
cf5bf97e
JW
6255 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6256 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6257 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6258 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6259
6260 /* Setup default profile for this ring */
6261 pring->iotag_max = 4096;
6262 pring->num_mask = 1;
6263 pring->prt[0].profile = 0; /* Mask 0 */
a4bc3379
JS
6264 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
6265 pring->prt[0].type = phba->cfg_multi_ring_type;
cf5bf97e
JW
6266 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
6267 return 0;
6268}
6269
e59058c4 6270/**
3621a710 6271 * lpfc_sli_async_event_handler - ASYNC iocb handler function
e59058c4
JS
6272 * @phba: Pointer to HBA context object.
6273 * @pring: Pointer to driver SLI ring object.
6274 * @iocbq: Pointer to iocb object.
6275 *
6276 * This function is called by the slow ring event handler
6277 * function when there is an ASYNC event iocb in the ring.
6278 * This function is called with no lock held.
6279 * Currently this function handles only temperature related
6280 * ASYNC events. The function decodes the temperature sensor
6281 * event message and posts events for the management applications.
6282 **/
98c9ea5c 6283static void
57127f15
JS
6284lpfc_sli_async_event_handler(struct lpfc_hba * phba,
6285 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
6286{
6287 IOCB_t *icmd;
6288 uint16_t evt_code;
6289 uint16_t temp;
6290 struct temp_event temp_event_data;
6291 struct Scsi_Host *shost;
a257bf90 6292 uint32_t *iocb_w;
57127f15
JS
6293
6294 icmd = &iocbq->iocb;
6295 evt_code = icmd->un.asyncstat.evt_code;
6296 temp = icmd->ulpContext;
6297
6298 if ((evt_code != ASYNC_TEMP_WARN) &&
6299 (evt_code != ASYNC_TEMP_SAFE)) {
a257bf90 6300 iocb_w = (uint32_t *) icmd;
57127f15
JS
6301 lpfc_printf_log(phba,
6302 KERN_ERR,
6303 LOG_SLI,
76bb24ef 6304 "0346 Ring %d handler: unexpected ASYNC_STATUS"
e4e74273 6305 " evt_code 0x%x\n"
a257bf90
JS
6306 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
6307 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
6308 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
6309 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
57127f15 6310 pring->ringno,
a257bf90
JS
6311 icmd->un.asyncstat.evt_code,
6312 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
6313 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
6314 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
6315 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
6316
57127f15
JS
6317 return;
6318 }
6319 temp_event_data.data = (uint32_t)temp;
6320 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6321 if (evt_code == ASYNC_TEMP_WARN) {
6322 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6323 lpfc_printf_log(phba,
09372820 6324 KERN_ERR,
57127f15 6325 LOG_TEMP,
76bb24ef 6326 "0347 Adapter is very hot, please take "
57127f15
JS
6327 "corrective action. temperature : %d Celsius\n",
6328 temp);
6329 }
6330 if (evt_code == ASYNC_TEMP_SAFE) {
6331 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6332 lpfc_printf_log(phba,
09372820 6333 KERN_ERR,
57127f15
JS
6334 LOG_TEMP,
6335 "0340 Adapter temperature is OK now. "
6336 "temperature : %d Celsius\n",
6337 temp);
6338 }
6339
6340 /* Send temperature change event to applications */
6341 shost = lpfc_shost_from_vport(phba->pport);
6342 fc_host_post_vendor_event(shost, fc_get_event_number(),
6343 sizeof(temp_event_data), (char *) &temp_event_data,
ddcc50f0 6344 LPFC_NL_VENDOR_ID);
57127f15
JS
6345
6346}
6347
6348
e59058c4 6349/**
3621a710 6350 * lpfc_sli_setup - SLI ring setup function
e59058c4
JS
6351 * @phba: Pointer to HBA context object.
6352 *
6353 * lpfc_sli_setup sets up rings of the SLI interface with
6354 * number of iocbs per ring and iotags. This function is
6355 * called while driver attach to the HBA and before the
6356 * interrupts are enabled. So there is no need for locking.
6357 *
6358 * This function always returns 0.
6359 **/
dea3101e
JB
6360int
6361lpfc_sli_setup(struct lpfc_hba *phba)
6362{
ed957684 6363 int i, totiocbsize = 0;
dea3101e
JB
6364 struct lpfc_sli *psli = &phba->sli;
6365 struct lpfc_sli_ring *pring;
6366
6367 psli->num_rings = MAX_CONFIGURED_RINGS;
6368 psli->sli_flag = 0;
6369 psli->fcp_ring = LPFC_FCP_RING;
6370 psli->next_ring = LPFC_FCP_NEXT_RING;
a4bc3379 6371 psli->extra_ring = LPFC_EXTRA_RING;
dea3101e 6372
604a3e30
JB
6373 psli->iocbq_lookup = NULL;
6374 psli->iocbq_lookup_len = 0;
6375 psli->last_iotag = 0;
6376
dea3101e
JB
6377 for (i = 0; i < psli->num_rings; i++) {
6378 pring = &psli->ring[i];
6379 switch (i) {
6380 case LPFC_FCP_RING: /* ring 0 - FCP */
6381 /* numCiocb and numRiocb are used in config_port */
6382 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
6383 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
6384 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6385 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6386 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6387 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
ed957684 6388 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
6389 SLI3_IOCB_CMD_SIZE :
6390 SLI2_IOCB_CMD_SIZE;
ed957684 6391 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
6392 SLI3_IOCB_RSP_SIZE :
6393 SLI2_IOCB_RSP_SIZE;
dea3101e
JB
6394 pring->iotag_ctr = 0;
6395 pring->iotag_max =
92d7f7b0 6396 (phba->cfg_hba_queue_depth * 2);
dea3101e
JB
6397 pring->fast_iotag = pring->iotag_max;
6398 pring->num_mask = 0;
6399 break;
a4bc3379 6400 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea3101e
JB
6401 /* numCiocb and numRiocb are used in config_port */
6402 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
6403 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
ed957684 6404 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
6405 SLI3_IOCB_CMD_SIZE :
6406 SLI2_IOCB_CMD_SIZE;
ed957684 6407 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
6408 SLI3_IOCB_RSP_SIZE :
6409 SLI2_IOCB_RSP_SIZE;
2e0fef85 6410 pring->iotag_max = phba->cfg_hba_queue_depth;
dea3101e
JB
6411 pring->num_mask = 0;
6412 break;
6413 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
6414 /* numCiocb and numRiocb are used in config_port */
6415 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
6416 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
ed957684 6417 pring->sizeCiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
6418 SLI3_IOCB_CMD_SIZE :
6419 SLI2_IOCB_CMD_SIZE;
ed957684 6420 pring->sizeRiocb = (phba->sli_rev == 3) ?
92d7f7b0
JS
6421 SLI3_IOCB_RSP_SIZE :
6422 SLI2_IOCB_RSP_SIZE;
dea3101e
JB
6423 pring->fast_iotag = 0;
6424 pring->iotag_ctr = 0;
6425 pring->iotag_max = 4096;
57127f15
JS
6426 pring->lpfc_sli_rcv_async_status =
6427 lpfc_sli_async_event_handler;
6669f9bb 6428 pring->num_mask = LPFC_MAX_RING_MASK;
dea3101e 6429 pring->prt[0].profile = 0; /* Mask 0 */
6a9c52cf
JS
6430 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
6431 pring->prt[0].type = FC_TYPE_ELS;
dea3101e 6432 pring->prt[0].lpfc_sli_rcv_unsol_event =
92d7f7b0 6433 lpfc_els_unsol_event;
dea3101e 6434 pring->prt[1].profile = 0; /* Mask 1 */
6a9c52cf
JS
6435 pring->prt[1].rctl = FC_RCTL_ELS_REP;
6436 pring->prt[1].type = FC_TYPE_ELS;
dea3101e 6437 pring->prt[1].lpfc_sli_rcv_unsol_event =
92d7f7b0 6438 lpfc_els_unsol_event;
dea3101e
JB
6439 pring->prt[2].profile = 0; /* Mask 2 */
6440 /* NameServer Inquiry */
6a9c52cf 6441 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea3101e 6442 /* NameServer */
6a9c52cf 6443 pring->prt[2].type = FC_TYPE_CT;
dea3101e 6444 pring->prt[2].lpfc_sli_rcv_unsol_event =
92d7f7b0 6445 lpfc_ct_unsol_event;
dea3101e
JB
6446 pring->prt[3].profile = 0; /* Mask 3 */
6447 /* NameServer response */
6a9c52cf 6448 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea3101e 6449 /* NameServer */
6a9c52cf 6450 pring->prt[3].type = FC_TYPE_CT;
dea3101e 6451 pring->prt[3].lpfc_sli_rcv_unsol_event =
92d7f7b0 6452 lpfc_ct_unsol_event;
6669f9bb
JS
6453 /* abort unsolicited sequence */
6454 pring->prt[4].profile = 0; /* Mask 4 */
6455 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
6456 pring->prt[4].type = FC_TYPE_BLS;
6457 pring->prt[4].lpfc_sli_rcv_unsol_event =
6458 lpfc_sli4_ct_abort_unsol_event;
dea3101e
JB
6459 break;
6460 }
ed957684 6461 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
92d7f7b0 6462 (pring->numRiocb * pring->sizeRiocb);
dea3101e 6463 }
ed957684 6464 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea3101e 6465 /* Too many cmd / rsp ring entries in SLI2 SLIM */
e8b62011
JS
6466 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
6467 "SLI2 SLIM Data: x%x x%lx\n",
6468 phba->brd_no, totiocbsize,
6469 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea3101e 6470 }
cf5bf97e
JW
6471 if (phba->cfg_multi_ring_support == 2)
6472 lpfc_extra_ring_setup(phba);
dea3101e
JB
6473
6474 return 0;
6475}
6476
e59058c4 6477/**
3621a710 6478 * lpfc_sli_queue_setup - Queue initialization function
e59058c4
JS
6479 * @phba: Pointer to HBA context object.
6480 *
6481 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
6482 * ring. This function also initializes ring indices of each ring.
6483 * This function is called during the initialization of the SLI
6484 * interface of an HBA.
6485 * This function is called with no lock held and always returns
6486 * 1.
6487 **/
dea3101e 6488int
2e0fef85 6489lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea3101e
JB
6490{
6491 struct lpfc_sli *psli;
6492 struct lpfc_sli_ring *pring;
604a3e30 6493 int i;
dea3101e
JB
6494
6495 psli = &phba->sli;
2e0fef85 6496 spin_lock_irq(&phba->hbalock);
dea3101e 6497 INIT_LIST_HEAD(&psli->mboxq);
92d7f7b0 6498 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea3101e
JB
6499 /* Initialize list headers for txq and txcmplq as double linked lists */
6500 for (i = 0; i < psli->num_rings; i++) {
6501 pring = &psli->ring[i];
6502 pring->ringno = i;
6503 pring->next_cmdidx = 0;
6504 pring->local_getidx = 0;
6505 pring->cmdidx = 0;
6506 INIT_LIST_HEAD(&pring->txq);
6507 INIT_LIST_HEAD(&pring->txcmplq);
6508 INIT_LIST_HEAD(&pring->iocb_continueq);
9c2face6 6509 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea3101e 6510 INIT_LIST_HEAD(&pring->postbufq);
dea3101e 6511 }
2e0fef85
JS
6512 spin_unlock_irq(&phba->hbalock);
6513 return 1;
dea3101e
JB
6514}
6515
04c68496
JS
6516/**
6517 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6518 * @phba: Pointer to HBA context object.
6519 *
6520 * This routine flushes the mailbox command subsystem. It will unconditionally
6521 * flush all the mailbox commands in the three possible stages in the mailbox
6522 * command sub-system: pending mailbox command queue; the outstanding mailbox
6523 * command; and completed mailbox command queue. It is caller's responsibility
6524 * to make sure that the driver is in the proper state to flush the mailbox
6525 * command sub-system. Namely, the posting of mailbox commands into the
6526 * pending mailbox command queue from the various clients must be stopped;
6527 * either the HBA is in a state that it will never works on the outstanding
6528 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6529 * mailbox command has been completed.
6530 **/
6531static void
6532lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6533{
6534 LIST_HEAD(completions);
6535 struct lpfc_sli *psli = &phba->sli;
6536 LPFC_MBOXQ_t *pmb;
6537 unsigned long iflag;
6538
6539 /* Flush all the mailbox commands in the mbox system */
6540 spin_lock_irqsave(&phba->hbalock, iflag);
6541 /* The pending mailbox command queue */
6542 list_splice_init(&phba->sli.mboxq, &completions);
6543 /* The outstanding active mailbox command */
6544 if (psli->mbox_active) {
6545 list_add_tail(&psli->mbox_active->list, &completions);
6546 psli->mbox_active = NULL;
6547 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6548 }
6549 /* The completed mailbox command queue */
6550 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6551 spin_unlock_irqrestore(&phba->hbalock, iflag);
6552
6553 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6554 while (!list_empty(&completions)) {
6555 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6556 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6557 if (pmb->mbox_cmpl)
6558 pmb->mbox_cmpl(phba, pmb);
6559 }
6560}
6561
e59058c4 6562/**
3621a710 6563 * lpfc_sli_host_down - Vport cleanup function
e59058c4
JS
6564 * @vport: Pointer to virtual port object.
6565 *
6566 * lpfc_sli_host_down is called to clean up the resources
6567 * associated with a vport before destroying virtual
6568 * port data structures.
6569 * This function does following operations:
6570 * - Free discovery resources associated with this virtual
6571 * port.
6572 * - Free iocbs associated with this virtual port in
6573 * the txq.
6574 * - Send abort for all iocb commands associated with this
6575 * vport in txcmplq.
6576 *
6577 * This function is called with no lock held and always returns 1.
6578 **/
92d7f7b0
JS
6579int
6580lpfc_sli_host_down(struct lpfc_vport *vport)
6581{
858c9f6c 6582 LIST_HEAD(completions);
92d7f7b0
JS
6583 struct lpfc_hba *phba = vport->phba;
6584 struct lpfc_sli *psli = &phba->sli;
6585 struct lpfc_sli_ring *pring;
6586 struct lpfc_iocbq *iocb, *next_iocb;
92d7f7b0
JS
6587 int i;
6588 unsigned long flags = 0;
6589 uint16_t prev_pring_flag;
6590
6591 lpfc_cleanup_discovery_resources(vport);
6592
6593 spin_lock_irqsave(&phba->hbalock, flags);
92d7f7b0
JS
6594 for (i = 0; i < psli->num_rings; i++) {
6595 pring = &psli->ring[i];
6596 prev_pring_flag = pring->flag;
5e9d9b82
JS
6597 /* Only slow rings */
6598 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 6599 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
6600 /* Set the lpfc data pending flag */
6601 set_bit(LPFC_DATA_READY, &phba->data_flags);
6602 }
92d7f7b0
JS
6603 /*
6604 * Error everything on the txq since these iocbs have not been
6605 * given to the FW yet.
6606 */
92d7f7b0
JS
6607 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
6608 if (iocb->vport != vport)
6609 continue;
858c9f6c 6610 list_move_tail(&iocb->list, &completions);
92d7f7b0 6611 pring->txq_cnt--;
92d7f7b0
JS
6612 }
6613
6614 /* Next issue ABTS for everything on the txcmplq */
6615 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
6616 list) {
6617 if (iocb->vport != vport)
6618 continue;
6619 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
6620 }
6621
6622 pring->flag = prev_pring_flag;
6623 }
6624
6625 spin_unlock_irqrestore(&phba->hbalock, flags);
6626
a257bf90
JS
6627 /* Cancel all the IOCBs from the completions list */
6628 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6629 IOERR_SLI_DOWN);
92d7f7b0
JS
6630 return 1;
6631}
6632
e59058c4 6633/**
3621a710 6634 * lpfc_sli_hba_down - Resource cleanup function for the HBA
e59058c4
JS
6635 * @phba: Pointer to HBA context object.
6636 *
6637 * This function cleans up all iocb, buffers, mailbox commands
6638 * while shutting down the HBA. This function is called with no
6639 * lock held and always returns 1.
6640 * This function does the following to cleanup driver resources:
6641 * - Free discovery resources for each virtual port
6642 * - Cleanup any pending fabric iocbs
6643 * - Iterate through the iocb txq and free each entry
6644 * in the list.
6645 * - Free up any buffer posted to the HBA
6646 * - Free mailbox commands in the mailbox queue.
6647 **/
dea3101e 6648int
2e0fef85 6649lpfc_sli_hba_down(struct lpfc_hba *phba)
dea3101e 6650{
2534ba75 6651 LIST_HEAD(completions);
2e0fef85 6652 struct lpfc_sli *psli = &phba->sli;
dea3101e 6653 struct lpfc_sli_ring *pring;
0ff10d46 6654 struct lpfc_dmabuf *buf_ptr;
dea3101e 6655 unsigned long flags = 0;
04c68496
JS
6656 int i;
6657
6658 /* Shutdown the mailbox command sub-system */
6659 lpfc_sli_mbox_sys_shutdown(phba);
dea3101e 6660
dea3101e
JB
6661 lpfc_hba_down_prep(phba);
6662
92d7f7b0
JS
6663 lpfc_fabric_abort_hba(phba);
6664
2e0fef85 6665 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e
JB
6666 for (i = 0; i < psli->num_rings; i++) {
6667 pring = &psli->ring[i];
5e9d9b82
JS
6668 /* Only slow rings */
6669 if (pring->ringno == LPFC_ELS_RING) {
858c9f6c 6670 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
6671 /* Set the lpfc data pending flag */
6672 set_bit(LPFC_DATA_READY, &phba->data_flags);
6673 }
dea3101e
JB
6674
6675 /*
6676 * Error everything on the txq since these iocbs have not been
6677 * given to the FW yet.
6678 */
2534ba75 6679 list_splice_init(&pring->txq, &completions);
dea3101e
JB
6680 pring->txq_cnt = 0;
6681
2534ba75 6682 }
2e0fef85 6683 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 6684
a257bf90
JS
6685 /* Cancel all the IOCBs from the completions list */
6686 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6687 IOERR_SLI_DOWN);
dea3101e 6688
0ff10d46
JS
6689 spin_lock_irqsave(&phba->hbalock, flags);
6690 list_splice_init(&phba->elsbuf, &completions);
6691 phba->elsbuf_cnt = 0;
6692 phba->elsbuf_prev_cnt = 0;
6693 spin_unlock_irqrestore(&phba->hbalock, flags);
6694
6695 while (!list_empty(&completions)) {
6696 list_remove_head(&completions, buf_ptr,
6697 struct lpfc_dmabuf, list);
6698 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
6699 kfree(buf_ptr);
6700 }
6701
dea3101e
JB
6702 /* Return any active mbox cmds */
6703 del_timer_sync(&psli->mbox_tmo);
2e0fef85 6704
da0436e9 6705 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
2e0fef85 6706 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
da0436e9 6707 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
2e0fef85 6708
da0436e9
JS
6709 return 1;
6710}
6711
6712/**
6713 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
6714 * @phba: Pointer to HBA context object.
6715 *
6716 * This function cleans up all queues, iocb, buffers, mailbox commands while
6717 * shutting down the SLI4 HBA FCoE function. This function is called with no
6718 * lock held and always returns 1.
6719 *
6720 * This function does the following to cleanup driver FCoE function resources:
6721 * - Free discovery resources for each virtual port
6722 * - Cleanup any pending fabric iocbs
6723 * - Iterate through the iocb txq and free each entry in the list.
6724 * - Free up any buffer posted to the HBA.
6725 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6726 * - Free mailbox commands in the mailbox queue.
6727 **/
6728int
6729lpfc_sli4_hba_down(struct lpfc_hba *phba)
6730{
6731 /* Stop the SLI4 device port */
6732 lpfc_stop_port(phba);
6733
6734 /* Tear down the queues in the HBA */
6735 lpfc_sli4_queue_unset(phba);
6736
6737 /* unregister default FCFI from the HBA */
6738 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
92d7f7b0 6739
dea3101e
JB
6740 return 1;
6741}
6742
e59058c4 6743/**
3621a710 6744 * lpfc_sli_pcimem_bcopy - SLI memory copy function
e59058c4
JS
6745 * @srcp: Source memory pointer.
6746 * @destp: Destination memory pointer.
6747 * @cnt: Number of words required to be copied.
6748 *
6749 * This function is used for copying data between driver memory
6750 * and the SLI memory. This function also changes the endianness
6751 * of each word if native endianness is different from SLI
6752 * endianness. This function can be called with or without
6753 * lock.
6754 **/
dea3101e
JB
6755void
6756lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
6757{
6758 uint32_t *src = srcp;
6759 uint32_t *dest = destp;
6760 uint32_t ldata;
6761 int i;
6762
6763 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
6764 ldata = *src;
6765 ldata = le32_to_cpu(ldata);
6766 *dest = ldata;
6767 src++;
6768 dest++;
6769 }
6770}
6771
e59058c4 6772
a0c87cbd
JS
6773/**
6774 * lpfc_sli_bemem_bcopy - SLI memory copy function
6775 * @srcp: Source memory pointer.
6776 * @destp: Destination memory pointer.
6777 * @cnt: Number of words required to be copied.
6778 *
6779 * This function is used for copying data between a data structure
6780 * with big endian representation to local endianness.
6781 * This function can be called with or without lock.
6782 **/
6783void
6784lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
6785{
6786 uint32_t *src = srcp;
6787 uint32_t *dest = destp;
6788 uint32_t ldata;
6789 int i;
6790
6791 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
6792 ldata = *src;
6793 ldata = be32_to_cpu(ldata);
6794 *dest = ldata;
6795 src++;
6796 dest++;
6797 }
6798}
6799
e59058c4 6800/**
3621a710 6801 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
e59058c4
JS
6802 * @phba: Pointer to HBA context object.
6803 * @pring: Pointer to driver SLI ring object.
6804 * @mp: Pointer to driver buffer object.
6805 *
6806 * This function is called with no lock held.
6807 * It always return zero after adding the buffer to the postbufq
6808 * buffer list.
6809 **/
dea3101e 6810int
2e0fef85
JS
6811lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6812 struct lpfc_dmabuf *mp)
dea3101e
JB
6813{
6814 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
6815 later */
2e0fef85 6816 spin_lock_irq(&phba->hbalock);
dea3101e 6817 list_add_tail(&mp->list, &pring->postbufq);
dea3101e 6818 pring->postbufq_cnt++;
2e0fef85 6819 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
6820 return 0;
6821}
6822
e59058c4 6823/**
3621a710 6824 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
e59058c4
JS
6825 * @phba: Pointer to HBA context object.
6826 *
6827 * When HBQ is enabled, buffers are searched based on tags. This function
6828 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
6829 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
6830 * does not conflict with tags of buffer posted for unsolicited events.
6831 * The function returns the allocated tag. The function is called with
6832 * no locks held.
6833 **/
76bb24ef
JS
6834uint32_t
6835lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
6836{
6837 spin_lock_irq(&phba->hbalock);
6838 phba->buffer_tag_count++;
6839 /*
6840 * Always set the QUE_BUFTAG_BIT to distiguish between
6841 * a tag assigned by HBQ.
6842 */
6843 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
6844 spin_unlock_irq(&phba->hbalock);
6845 return phba->buffer_tag_count;
6846}
6847
e59058c4 6848/**
3621a710 6849 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
e59058c4
JS
6850 * @phba: Pointer to HBA context object.
6851 * @pring: Pointer to driver SLI ring object.
6852 * @tag: Buffer tag.
6853 *
6854 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
6855 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
6856 * iocb is posted to the response ring with the tag of the buffer.
6857 * This function searches the pring->postbufq list using the tag
6858 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
6859 * iocb. If the buffer is found then lpfc_dmabuf object of the
6860 * buffer is returned to the caller else NULL is returned.
6861 * This function is called with no lock held.
6862 **/
76bb24ef
JS
6863struct lpfc_dmabuf *
6864lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6865 uint32_t tag)
6866{
6867 struct lpfc_dmabuf *mp, *next_mp;
6868 struct list_head *slp = &pring->postbufq;
6869
6870 /* Search postbufq, from the begining, looking for a match on tag */
6871 spin_lock_irq(&phba->hbalock);
6872 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
6873 if (mp->buffer_tag == tag) {
6874 list_del_init(&mp->list);
6875 pring->postbufq_cnt--;
6876 spin_unlock_irq(&phba->hbalock);
6877 return mp;
6878 }
6879 }
6880
6881 spin_unlock_irq(&phba->hbalock);
6882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 6883 "0402 Cannot find virtual addr for buffer tag on "
76bb24ef
JS
6884 "ring %d Data x%lx x%p x%p x%x\n",
6885 pring->ringno, (unsigned long) tag,
6886 slp->next, slp->prev, pring->postbufq_cnt);
6887
6888 return NULL;
6889}
dea3101e 6890
e59058c4 6891/**
3621a710 6892 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
e59058c4
JS
6893 * @phba: Pointer to HBA context object.
6894 * @pring: Pointer to driver SLI ring object.
6895 * @phys: DMA address of the buffer.
6896 *
6897 * This function searches the buffer list using the dma_address
6898 * of unsolicited event to find the driver's lpfc_dmabuf object
6899 * corresponding to the dma_address. The function returns the
6900 * lpfc_dmabuf object if a buffer is found else it returns NULL.
6901 * This function is called by the ct and els unsolicited event
6902 * handlers to get the buffer associated with the unsolicited
6903 * event.
6904 *
6905 * This function is called with no lock held.
6906 **/
dea3101e
JB
6907struct lpfc_dmabuf *
6908lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6909 dma_addr_t phys)
6910{
6911 struct lpfc_dmabuf *mp, *next_mp;
6912 struct list_head *slp = &pring->postbufq;
6913
6914 /* Search postbufq, from the begining, looking for a match on phys */
2e0fef85 6915 spin_lock_irq(&phba->hbalock);
dea3101e
JB
6916 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
6917 if (mp->phys == phys) {
6918 list_del_init(&mp->list);
6919 pring->postbufq_cnt--;
2e0fef85 6920 spin_unlock_irq(&phba->hbalock);
dea3101e
JB
6921 return mp;
6922 }
6923 }
6924
2e0fef85 6925 spin_unlock_irq(&phba->hbalock);
dea3101e 6926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 6927 "0410 Cannot find virtual addr for mapped buf on "
dea3101e 6928 "ring %d Data x%llx x%p x%p x%x\n",
e8b62011 6929 pring->ringno, (unsigned long long)phys,
dea3101e
JB
6930 slp->next, slp->prev, pring->postbufq_cnt);
6931 return NULL;
6932}
6933
e59058c4 6934/**
3621a710 6935 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
e59058c4
JS
6936 * @phba: Pointer to HBA context object.
6937 * @cmdiocb: Pointer to driver command iocb object.
6938 * @rspiocb: Pointer to driver response iocb object.
6939 *
6940 * This function is the completion handler for the abort iocbs for
6941 * ELS commands. This function is called from the ELS ring event
6942 * handler with no lock held. This function frees memory resources
6943 * associated with the abort iocb.
6944 **/
dea3101e 6945static void
2e0fef85
JS
6946lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6947 struct lpfc_iocbq *rspiocb)
dea3101e 6948{
2e0fef85 6949 IOCB_t *irsp = &rspiocb->iocb;
2680eeaa 6950 uint16_t abort_iotag, abort_context;
92d7f7b0 6951 struct lpfc_iocbq *abort_iocb;
2680eeaa
JS
6952 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6953
6954 abort_iocb = NULL;
2680eeaa
JS
6955
6956 if (irsp->ulpStatus) {
6957 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
6958 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6959
2e0fef85 6960 spin_lock_irq(&phba->hbalock);
45ed1190
JS
6961 if (phba->sli_rev < LPFC_SLI_REV4) {
6962 if (abort_iotag != 0 &&
6963 abort_iotag <= phba->sli.last_iotag)
6964 abort_iocb =
6965 phba->sli.iocbq_lookup[abort_iotag];
6966 } else
6967 /* For sli4 the abort_tag is the XRI,
6968 * so the abort routine puts the iotag of the iocb
6969 * being aborted in the context field of the abort
6970 * IOCB.
6971 */
6972 abort_iocb = phba->sli.iocbq_lookup[abort_context];
2680eeaa 6973
92d7f7b0 6974 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
e8b62011 6975 "0327 Cannot abort els iocb %p "
92d7f7b0
JS
6976 "with tag %x context %x, abort status %x, "
6977 "abort code %x\n",
e8b62011
JS
6978 abort_iocb, abort_iotag, abort_context,
6979 irsp->ulpStatus, irsp->un.ulpWord[4]);
2680eeaa 6980
58da1ffb
JS
6981 /*
6982 * If the iocb is not found in Firmware queue the iocb
6983 * might have completed already. Do not free it again.
6984 */
9b379605 6985 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
45ed1190
JS
6986 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
6987 spin_unlock_irq(&phba->hbalock);
6988 lpfc_sli_release_iocbq(phba, cmdiocb);
6989 return;
6990 }
6991 /* For SLI4 the ulpContext field for abort IOCB
6992 * holds the iotag of the IOCB being aborted so
6993 * the local abort_context needs to be reset to
6994 * match the aborted IOCBs ulpContext.
6995 */
6996 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
6997 abort_context = abort_iocb->iocb.ulpContext;
58da1ffb 6998 }
2680eeaa
JS
6999 /*
7000 * make sure we have the right iocbq before taking it
7001 * off the txcmplq and try to call completion routine.
7002 */
2e0fef85
JS
7003 if (!abort_iocb ||
7004 abort_iocb->iocb.ulpContext != abort_context ||
7005 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
7006 spin_unlock_irq(&phba->hbalock);
7007 else {
92d7f7b0 7008 list_del_init(&abort_iocb->list);
2680eeaa 7009 pring->txcmplq_cnt--;
2e0fef85 7010 spin_unlock_irq(&phba->hbalock);
2680eeaa 7011
0ff10d46
JS
7012 /* Firmware could still be in progress of DMAing
7013 * payload, so don't free data buffer till after
7014 * a hbeat.
7015 */
7016 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7017
92d7f7b0
JS
7018 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7019 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7020 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
7021 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
2680eeaa
JS
7022 }
7023 }
7024
604a3e30 7025 lpfc_sli_release_iocbq(phba, cmdiocb);
dea3101e
JB
7026 return;
7027}
7028
e59058c4 7029/**
3621a710 7030 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
e59058c4
JS
7031 * @phba: Pointer to HBA context object.
7032 * @cmdiocb: Pointer to driver command iocb object.
7033 * @rspiocb: Pointer to driver response iocb object.
7034 *
7035 * The function is called from SLI ring event handler with no
7036 * lock held. This function is the completion handler for ELS commands
7037 * which are aborted. The function frees memory resources used for
7038 * the aborted ELS commands.
7039 **/
92d7f7b0
JS
7040static void
7041lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7042 struct lpfc_iocbq *rspiocb)
7043{
7044 IOCB_t *irsp = &rspiocb->iocb;
7045
7046 /* ELS cmd tag <ulpIoTag> completes */
7047 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
d7c255b2 7048 "0139 Ignoring ELS cmd tag x%x completion Data: "
92d7f7b0 7049 "x%x x%x x%x\n",
e8b62011 7050 irsp->ulpIoTag, irsp->ulpStatus,
92d7f7b0 7051 irsp->un.ulpWord[4], irsp->ulpTimeout);
858c9f6c
JS
7052 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
7053 lpfc_ct_free_iocb(phba, cmdiocb);
7054 else
7055 lpfc_els_free_iocb(phba, cmdiocb);
92d7f7b0
JS
7056 return;
7057}
7058
e59058c4 7059/**
3621a710 7060 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
e59058c4
JS
7061 * @phba: Pointer to HBA context object.
7062 * @pring: Pointer to driver SLI ring object.
7063 * @cmdiocb: Pointer to driver command iocb object.
7064 *
7065 * This function issues an abort iocb for the provided command
7066 * iocb. This function is called with hbalock held.
7067 * The function returns 0 when it fails due to memory allocation
7068 * failure or when the command iocb is an abort request.
7069 **/
dea3101e 7070int
2e0fef85
JS
7071lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7072 struct lpfc_iocbq *cmdiocb)
dea3101e 7073{
2e0fef85 7074 struct lpfc_vport *vport = cmdiocb->vport;
0bd4ca25 7075 struct lpfc_iocbq *abtsiocbp;
dea3101e
JB
7076 IOCB_t *icmd = NULL;
7077 IOCB_t *iabt = NULL;
07951076
JS
7078 int retval = IOCB_ERROR;
7079
92d7f7b0
JS
7080 /*
7081 * There are certain command types we don't want to abort. And we
7082 * don't want to abort commands that are already in the process of
7083 * being aborted.
07951076
JS
7084 */
7085 icmd = &cmdiocb->iocb;
2e0fef85 7086 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
92d7f7b0
JS
7087 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7088 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
07951076
JS
7089 return 0;
7090
858c9f6c
JS
7091 /* If we're unloading, don't abort iocb on the ELS ring, but change the
7092 * callback so that nothing happens when it finishes.
07951076 7093 */
858c9f6c
JS
7094 if ((vport->load_flag & FC_UNLOADING) &&
7095 (pring->ringno == LPFC_ELS_RING)) {
92d7f7b0
JS
7096 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
7097 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
7098 else
7099 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
07951076 7100 goto abort_iotag_exit;
92d7f7b0 7101 }
dea3101e
JB
7102
7103 /* issue ABTS for this IOCB based on iotag */
92d7f7b0 7104 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea3101e
JB
7105 if (abtsiocbp == NULL)
7106 return 0;
dea3101e 7107
07951076
JS
7108 /* This signals the response to set the correct status
7109 * before calling the completion handler.
7110 */
7111 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7112
dea3101e 7113 iabt = &abtsiocbp->iocb;
07951076
JS
7114 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7115 iabt->un.acxri.abortContextTag = icmd->ulpContext;
45ed1190 7116 if (phba->sli_rev == LPFC_SLI_REV4) {
da0436e9 7117 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
45ed1190
JS
7118 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7119 }
da0436e9
JS
7120 else
7121 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
07951076
JS
7122 iabt->ulpLe = 1;
7123 iabt->ulpClass = icmd->ulpClass;
dea3101e 7124
5ffc266e
JS
7125 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7126 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
7127
2e0fef85 7128 if (phba->link_state >= LPFC_LINK_UP)
07951076
JS
7129 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7130 else
7131 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 7132
07951076 7133 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
5b8bd0c9 7134
e8b62011
JS
7135 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
7136 "0339 Abort xri x%x, original iotag x%x, "
7137 "abort cmd iotag x%x\n",
7138 iabt->un.acxri.abortContextTag,
7139 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
da0436e9 7140 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
dea3101e 7141
d7c255b2
JS
7142 if (retval)
7143 __lpfc_sli_release_iocbq(phba, abtsiocbp);
07951076 7144abort_iotag_exit:
2e0fef85
JS
7145 /*
7146 * Caller to this routine should check for IOCB_ERROR
7147 * and handle it properly. This routine no longer removes
7148 * iocb off txcmplq and call compl in case of IOCB_ERROR.
07951076 7149 */
2e0fef85 7150 return retval;
dea3101e
JB
7151}
7152
e59058c4 7153/**
3621a710 7154 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
e59058c4
JS
7155 * @iocbq: Pointer to driver iocb object.
7156 * @vport: Pointer to driver virtual port object.
7157 * @tgt_id: SCSI ID of the target.
7158 * @lun_id: LUN ID of the scsi device.
7159 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
7160 *
3621a710 7161 * This function acts as an iocb filter for functions which abort or count
e59058c4
JS
7162 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
7163 * 0 if the filtering criteria is met for the given iocb and will return
7164 * 1 if the filtering criteria is not met.
7165 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
7166 * given iocb is for the SCSI device specified by vport, tgt_id and
7167 * lun_id parameter.
7168 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
7169 * given iocb is for the SCSI target specified by vport and tgt_id
7170 * parameters.
7171 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
7172 * given iocb is for the SCSI host associated with the given vport.
7173 * This function is called with no locks held.
7174 **/
dea3101e 7175static int
51ef4c26
JS
7176lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
7177 uint16_t tgt_id, uint64_t lun_id,
0bd4ca25 7178 lpfc_ctx_cmd ctx_cmd)
dea3101e 7179{
0bd4ca25 7180 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e
JB
7181 int rc = 1;
7182
0bd4ca25
JSEC
7183 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
7184 return rc;
7185
51ef4c26
JS
7186 if (iocbq->vport != vport)
7187 return rc;
7188
0bd4ca25 7189 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
0bd4ca25 7190
495a714c 7191 if (lpfc_cmd->pCmd == NULL)
dea3101e
JB
7192 return rc;
7193
7194 switch (ctx_cmd) {
7195 case LPFC_CTX_LUN:
495a714c
JS
7196 if ((lpfc_cmd->rdata->pnode) &&
7197 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
7198 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea3101e
JB
7199 rc = 0;
7200 break;
7201 case LPFC_CTX_TGT:
495a714c
JS
7202 if ((lpfc_cmd->rdata->pnode) &&
7203 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea3101e
JB
7204 rc = 0;
7205 break;
dea3101e
JB
7206 case LPFC_CTX_HOST:
7207 rc = 0;
7208 break;
7209 default:
7210 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
cadbd4a5 7211 __func__, ctx_cmd);
dea3101e
JB
7212 break;
7213 }
7214
7215 return rc;
7216}
7217
e59058c4 7218/**
3621a710 7219 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
e59058c4
JS
7220 * @vport: Pointer to virtual port.
7221 * @tgt_id: SCSI ID of the target.
7222 * @lun_id: LUN ID of the scsi device.
7223 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7224 *
7225 * This function returns number of FCP commands pending for the vport.
7226 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
7227 * commands pending on the vport associated with SCSI device specified
7228 * by tgt_id and lun_id parameters.
7229 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
7230 * commands pending on the vport associated with SCSI target specified
7231 * by tgt_id parameter.
7232 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
7233 * commands pending on the vport.
7234 * This function returns the number of iocbs which satisfy the filter.
7235 * This function is called without any lock held.
7236 **/
dea3101e 7237int
51ef4c26
JS
7238lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
7239 lpfc_ctx_cmd ctx_cmd)
dea3101e 7240{
51ef4c26 7241 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
7242 struct lpfc_iocbq *iocbq;
7243 int sum, i;
dea3101e 7244
0bd4ca25
JSEC
7245 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
7246 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 7247
51ef4c26
JS
7248 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
7249 ctx_cmd) == 0)
0bd4ca25 7250 sum++;
dea3101e 7251 }
0bd4ca25 7252
dea3101e
JB
7253 return sum;
7254}
7255
e59058c4 7256/**
3621a710 7257 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
e59058c4
JS
7258 * @phba: Pointer to HBA context object
7259 * @cmdiocb: Pointer to command iocb object.
7260 * @rspiocb: Pointer to response iocb object.
7261 *
7262 * This function is called when an aborted FCP iocb completes. This
7263 * function is called by the ring event handler with no lock held.
7264 * This function frees the iocb.
7265 **/
5eb95af0 7266void
2e0fef85
JS
7267lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7268 struct lpfc_iocbq *rspiocb)
5eb95af0 7269{
604a3e30 7270 lpfc_sli_release_iocbq(phba, cmdiocb);
5eb95af0
JSEC
7271 return;
7272}
7273
e59058c4 7274/**
3621a710 7275 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
e59058c4
JS
7276 * @vport: Pointer to virtual port.
7277 * @pring: Pointer to driver SLI ring object.
7278 * @tgt_id: SCSI ID of the target.
7279 * @lun_id: LUN ID of the scsi device.
7280 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7281 *
7282 * This function sends an abort command for every SCSI command
7283 * associated with the given virtual port pending on the ring
7284 * filtered by lpfc_sli_validate_fcp_iocb function.
7285 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
7286 * FCP iocbs associated with lun specified by tgt_id and lun_id
7287 * parameters
7288 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
7289 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
7290 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
7291 * FCP iocbs associated with virtual port.
7292 * This function returns number of iocbs it failed to abort.
7293 * This function is called with no locks held.
7294 **/
dea3101e 7295int
51ef4c26
JS
7296lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7297 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea3101e 7298{
51ef4c26 7299 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
7300 struct lpfc_iocbq *iocbq;
7301 struct lpfc_iocbq *abtsiocb;
dea3101e 7302 IOCB_t *cmd = NULL;
dea3101e 7303 int errcnt = 0, ret_val = 0;
0bd4ca25 7304 int i;
dea3101e 7305
0bd4ca25
JSEC
7306 for (i = 1; i <= phba->sli.last_iotag; i++) {
7307 iocbq = phba->sli.iocbq_lookup[i];
dea3101e 7308
51ef4c26 7309 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
2e0fef85 7310 abort_cmd) != 0)
dea3101e
JB
7311 continue;
7312
7313 /* issue ABTS for this IOCB based on iotag */
0bd4ca25 7314 abtsiocb = lpfc_sli_get_iocbq(phba);
dea3101e
JB
7315 if (abtsiocb == NULL) {
7316 errcnt++;
7317 continue;
7318 }
dea3101e 7319
0bd4ca25 7320 cmd = &iocbq->iocb;
dea3101e
JB
7321 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
7322 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
da0436e9
JS
7323 if (phba->sli_rev == LPFC_SLI_REV4)
7324 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7325 else
7326 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e
JB
7327 abtsiocb->iocb.ulpLe = 1;
7328 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2e0fef85 7329 abtsiocb->vport = phba->pport;
dea3101e 7330
5ffc266e
JS
7331 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7332 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
7333
2e0fef85 7334 if (lpfc_is_link_up(phba))
dea3101e
JB
7335 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7336 else
7337 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
7338
5eb95af0
JSEC
7339 /* Setup callback routine and issue the command. */
7340 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
da0436e9
JS
7341 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7342 abtsiocb, 0);
dea3101e 7343 if (ret_val == IOCB_ERROR) {
604a3e30 7344 lpfc_sli_release_iocbq(phba, abtsiocb);
dea3101e
JB
7345 errcnt++;
7346 continue;
7347 }
7348 }
7349
7350 return errcnt;
7351}
7352
e59058c4 7353/**
3621a710 7354 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
e59058c4
JS
7355 * @phba: Pointer to HBA context object.
7356 * @cmdiocbq: Pointer to command iocb.
7357 * @rspiocbq: Pointer to response iocb.
7358 *
7359 * This function is the completion handler for iocbs issued using
7360 * lpfc_sli_issue_iocb_wait function. This function is called by the
7361 * ring event handler function without any lock held. This function
7362 * can be called from both worker thread context and interrupt
7363 * context. This function also can be called from other thread which
7364 * cleans up the SLI layer objects.
7365 * This function copy the contents of the response iocb to the
7366 * response iocb memory object provided by the caller of
7367 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
7368 * sleeps for the iocb completion.
7369 **/
68876920
JSEC
7370static void
7371lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7372 struct lpfc_iocbq *cmdiocbq,
7373 struct lpfc_iocbq *rspiocbq)
dea3101e 7374{
68876920
JSEC
7375 wait_queue_head_t *pdone_q;
7376 unsigned long iflags;
dea3101e 7377
2e0fef85 7378 spin_lock_irqsave(&phba->hbalock, iflags);
68876920
JSEC
7379 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
7380 if (cmdiocbq->context2 && rspiocbq)
7381 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7382 &rspiocbq->iocb, sizeof(IOCB_t));
7383
7384 pdone_q = cmdiocbq->context_un.wait_queue;
68876920
JSEC
7385 if (pdone_q)
7386 wake_up(pdone_q);
858c9f6c 7387 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e
JB
7388 return;
7389}
7390
d11e31dd
JS
7391/**
7392 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7393 * @phba: Pointer to HBA context object..
7394 * @piocbq: Pointer to command iocb.
7395 * @flag: Flag to test.
7396 *
7397 * This routine grabs the hbalock and then test the iocb_flag to
7398 * see if the passed in flag is set.
7399 * Returns:
7400 * 1 if flag is set.
7401 * 0 if flag is not set.
7402 **/
7403static int
7404lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7405 struct lpfc_iocbq *piocbq, uint32_t flag)
7406{
7407 unsigned long iflags;
7408 int ret;
7409
7410 spin_lock_irqsave(&phba->hbalock, iflags);
7411 ret = piocbq->iocb_flag & flag;
7412 spin_unlock_irqrestore(&phba->hbalock, iflags);
7413 return ret;
7414
7415}
7416
e59058c4 7417/**
3621a710 7418 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
e59058c4
JS
7419 * @phba: Pointer to HBA context object..
7420 * @pring: Pointer to sli ring.
7421 * @piocb: Pointer to command iocb.
7422 * @prspiocbq: Pointer to response iocb.
7423 * @timeout: Timeout in number of seconds.
7424 *
7425 * This function issues the iocb to firmware and waits for the
7426 * iocb to complete. If the iocb command is not
7427 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
7428 * Caller should not free the iocb resources if this function
7429 * returns IOCB_TIMEDOUT.
7430 * The function waits for the iocb completion using an
7431 * non-interruptible wait.
7432 * This function will sleep while waiting for iocb completion.
7433 * So, this function should not be called from any context which
7434 * does not allow sleeping. Due to the same reason, this function
7435 * cannot be called with interrupt disabled.
7436 * This function assumes that the iocb completions occur while
7437 * this function sleep. So, this function cannot be called from
7438 * the thread which process iocb completion for this ring.
7439 * This function clears the iocb_flag of the iocb object before
7440 * issuing the iocb and the iocb completion handler sets this
7441 * flag and wakes this thread when the iocb completes.
7442 * The contents of the response iocb will be copied to prspiocbq
7443 * by the completion handler when the command completes.
7444 * This function returns IOCB_SUCCESS when success.
7445 * This function is called with no lock held.
7446 **/
dea3101e 7447int
2e0fef85 7448lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
da0436e9 7449 uint32_t ring_number,
2e0fef85
JS
7450 struct lpfc_iocbq *piocb,
7451 struct lpfc_iocbq *prspiocbq,
68876920 7452 uint32_t timeout)
dea3101e 7453{
7259f0d0 7454 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
68876920
JSEC
7455 long timeleft, timeout_req = 0;
7456 int retval = IOCB_SUCCESS;
875fbdfe 7457 uint32_t creg_val;
dea3101e
JB
7458
7459 /*
68876920
JSEC
7460 * If the caller has provided a response iocbq buffer, then context2
7461 * is NULL or its an error.
dea3101e 7462 */
68876920
JSEC
7463 if (prspiocbq) {
7464 if (piocb->context2)
7465 return IOCB_ERROR;
7466 piocb->context2 = prspiocbq;
dea3101e
JB
7467 }
7468
68876920
JSEC
7469 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
7470 piocb->context_un.wait_queue = &done_q;
7471 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea3101e 7472
875fbdfe
JSEC
7473 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7474 creg_val = readl(phba->HCregaddr);
7475 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
7476 writel(creg_val, phba->HCregaddr);
7477 readl(phba->HCregaddr); /* flush */
7478 }
7479
da0436e9 7480 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
68876920
JSEC
7481 if (retval == IOCB_SUCCESS) {
7482 timeout_req = timeout * HZ;
68876920 7483 timeleft = wait_event_timeout(done_q,
d11e31dd 7484 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
68876920 7485 timeout_req);
dea3101e 7486
7054a606
JS
7487 if (piocb->iocb_flag & LPFC_IO_WAKE) {
7488 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
e8b62011 7489 "0331 IOCB wake signaled\n");
7054a606 7490 } else if (timeleft == 0) {
68876920 7491 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
7492 "0338 IOCB wait timeout error - no "
7493 "wake response Data x%x\n", timeout);
68876920 7494 retval = IOCB_TIMEDOUT;
7054a606 7495 } else {
68876920 7496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e8b62011
JS
7497 "0330 IOCB wake NOT set, "
7498 "Data x%x x%lx\n",
68876920
JSEC
7499 timeout, (timeleft / jiffies));
7500 retval = IOCB_TIMEDOUT;
dea3101e 7501 }
68876920
JSEC
7502 } else {
7503 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d7c255b2 7504 "0332 IOCB wait issue failed, Data x%x\n",
e8b62011 7505 retval);
68876920 7506 retval = IOCB_ERROR;
dea3101e
JB
7507 }
7508
875fbdfe
JSEC
7509 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7510 creg_val = readl(phba->HCregaddr);
7511 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
7512 writel(creg_val, phba->HCregaddr);
7513 readl(phba->HCregaddr); /* flush */
7514 }
7515
68876920
JSEC
7516 if (prspiocbq)
7517 piocb->context2 = NULL;
7518
7519 piocb->context_un.wait_queue = NULL;
7520 piocb->iocb_cmpl = NULL;
dea3101e
JB
7521 return retval;
7522}
68876920 7523
e59058c4 7524/**
3621a710 7525 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
e59058c4
JS
7526 * @phba: Pointer to HBA context object.
7527 * @pmboxq: Pointer to driver mailbox object.
7528 * @timeout: Timeout in number of seconds.
7529 *
7530 * This function issues the mailbox to firmware and waits for the
7531 * mailbox command to complete. If the mailbox command is not
7532 * completed within timeout seconds, it returns MBX_TIMEOUT.
7533 * The function waits for the mailbox completion using an
7534 * interruptible wait. If the thread is woken up due to a
7535 * signal, MBX_TIMEOUT error is returned to the caller. Caller
7536 * should not free the mailbox resources, if this function returns
7537 * MBX_TIMEOUT.
7538 * This function will sleep while waiting for mailbox completion.
7539 * So, this function should not be called from any context which
7540 * does not allow sleeping. Due to the same reason, this function
7541 * cannot be called with interrupt disabled.
7542 * This function assumes that the mailbox completion occurs while
7543 * this function sleep. So, this function cannot be called from
7544 * the worker thread which processes mailbox completion.
7545 * This function is called in the context of HBA management
7546 * applications.
7547 * This function returns MBX_SUCCESS when successful.
7548 * This function is called with no lock held.
7549 **/
dea3101e 7550int
2e0fef85 7551lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea3101e
JB
7552 uint32_t timeout)
7553{
7259f0d0 7554 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea3101e 7555 int retval;
858c9f6c 7556 unsigned long flag;
dea3101e
JB
7557
7558 /* The caller must leave context1 empty. */
98c9ea5c 7559 if (pmboxq->context1)
2e0fef85 7560 return MBX_NOT_FINISHED;
dea3101e 7561
495a714c 7562 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea3101e
JB
7563 /* setup wake call as IOCB callback */
7564 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
7565 /* setup context field to pass wait_queue pointer to wake function */
7566 pmboxq->context1 = &done_q;
7567
dea3101e
JB
7568 /* now issue the command */
7569 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
7570
7571 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7054a606
JS
7572 wait_event_interruptible_timeout(done_q,
7573 pmboxq->mbox_flag & LPFC_MBX_WAKE,
7574 timeout * HZ);
7575
858c9f6c 7576 spin_lock_irqsave(&phba->hbalock, flag);
dea3101e 7577 pmboxq->context1 = NULL;
7054a606
JS
7578 /*
7579 * if LPFC_MBX_WAKE flag is set the mailbox is completed
7580 * else do not free the resources.
7581 */
7582 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
dea3101e 7583 retval = MBX_SUCCESS;
858c9f6c 7584 else {
7054a606 7585 retval = MBX_TIMEOUT;
858c9f6c
JS
7586 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7587 }
7588 spin_unlock_irqrestore(&phba->hbalock, flag);
dea3101e
JB
7589 }
7590
dea3101e
JB
7591 return retval;
7592}
7593
e59058c4 7594/**
3772a991 7595 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
e59058c4
JS
7596 * @phba: Pointer to HBA context.
7597 *
3772a991
JS
7598 * This function is called to shutdown the driver's mailbox sub-system.
7599 * It first marks the mailbox sub-system is in a block state to prevent
7600 * the asynchronous mailbox command from issued off the pending mailbox
7601 * command queue. If the mailbox command sub-system shutdown is due to
7602 * HBA error conditions such as EEH or ERATT, this routine shall invoke
7603 * the mailbox sub-system flush routine to forcefully bring down the
7604 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7605 * as with offline or HBA function reset), this routine will wait for the
7606 * outstanding mailbox command to complete before invoking the mailbox
7607 * sub-system flush routine to gracefully bring down mailbox sub-system.
e59058c4 7608 **/
3772a991
JS
7609void
7610lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
b4c02652 7611{
3772a991
JS
7612 struct lpfc_sli *psli = &phba->sli;
7613 uint8_t actcmd = MBX_HEARTBEAT;
7614 unsigned long timeout;
b4c02652 7615
3772a991
JS
7616 spin_lock_irq(&phba->hbalock);
7617 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7618 spin_unlock_irq(&phba->hbalock);
b4c02652 7619
3772a991 7620 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
ed957684 7621 spin_lock_irq(&phba->hbalock);
3772a991
JS
7622 if (phba->sli.mbox_active)
7623 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
ed957684 7624 spin_unlock_irq(&phba->hbalock);
3772a991
JS
7625 /* Determine how long we might wait for the active mailbox
7626 * command to be gracefully completed by firmware.
7627 */
7628 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7629 1000) + jiffies;
7630 while (phba->sli.mbox_active) {
7631 /* Check active mailbox complete status every 2ms */
7632 msleep(2);
7633 if (time_after(jiffies, timeout))
7634 /* Timeout, let the mailbox flush routine to
7635 * forcefully release active mailbox command
7636 */
7637 break;
7638 }
7639 }
7640 lpfc_sli_mbox_sys_flush(phba);
7641}
ed957684 7642
3772a991
JS
7643/**
7644 * lpfc_sli_eratt_read - read sli-3 error attention events
7645 * @phba: Pointer to HBA context.
7646 *
7647 * This function is called to read the SLI3 device error attention registers
7648 * for possible error attention events. The caller must hold the hostlock
7649 * with spin_lock_irq().
7650 *
7651 * This fucntion returns 1 when there is Error Attention in the Host Attention
7652 * Register and returns 0 otherwise.
7653 **/
7654static int
7655lpfc_sli_eratt_read(struct lpfc_hba *phba)
7656{
7657 uint32_t ha_copy;
b4c02652 7658
3772a991
JS
7659 /* Read chip Host Attention (HA) register */
7660 ha_copy = readl(phba->HAregaddr);
7661 if (ha_copy & HA_ERATT) {
7662 /* Read host status register to retrieve error event */
7663 lpfc_sli_read_hs(phba);
b4c02652 7664
3772a991
JS
7665 /* Check if there is a deferred error condition is active */
7666 if ((HS_FFER1 & phba->work_hs) &&
7667 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7668 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
3772a991 7669 phba->hba_flag |= DEFER_ERATT;
3772a991
JS
7670 /* Clear all interrupt enable conditions */
7671 writel(0, phba->HCregaddr);
7672 readl(phba->HCregaddr);
7673 }
7674
7675 /* Set the driver HA work bitmap */
3772a991
JS
7676 phba->work_ha |= HA_ERATT;
7677 /* Indicate polling handles this ERATT */
7678 phba->hba_flag |= HBA_ERATT_HANDLED;
3772a991
JS
7679 return 1;
7680 }
7681 return 0;
b4c02652
JS
7682}
7683
da0436e9
JS
7684/**
7685 * lpfc_sli4_eratt_read - read sli-4 error attention events
7686 * @phba: Pointer to HBA context.
7687 *
7688 * This function is called to read the SLI4 device error attention registers
7689 * for possible error attention events. The caller must hold the hostlock
7690 * with spin_lock_irq().
7691 *
7692 * This fucntion returns 1 when there is Error Attention in the Host Attention
7693 * Register and returns 0 otherwise.
7694 **/
7695static int
7696lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7697{
7698 uint32_t uerr_sta_hi, uerr_sta_lo;
da0436e9
JS
7699
7700 /* For now, use the SLI4 device internal unrecoverable error
7701 * registers for error attention. This can be changed later.
7702 */
a747c9ce
JS
7703 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7704 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7705 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
7706 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
7707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7708 "1423 HBA Unrecoverable error: "
7709 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7710 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
7711 uerr_sta_lo, uerr_sta_hi,
7712 phba->sli4_hba.ue_mask_lo,
7713 phba->sli4_hba.ue_mask_hi);
7714 phba->work_status[0] = uerr_sta_lo;
7715 phba->work_status[1] = uerr_sta_hi;
7716 /* Set the driver HA work bitmap */
7717 phba->work_ha |= HA_ERATT;
7718 /* Indicate polling handles this ERATT */
7719 phba->hba_flag |= HBA_ERATT_HANDLED;
7720 return 1;
da0436e9
JS
7721 }
7722 return 0;
7723}
7724
e59058c4 7725/**
3621a710 7726 * lpfc_sli_check_eratt - check error attention events
9399627f
JS
7727 * @phba: Pointer to HBA context.
7728 *
3772a991 7729 * This function is called from timer soft interrupt context to check HBA's
9399627f
JS
7730 * error attention register bit for error attention events.
7731 *
7732 * This fucntion returns 1 when there is Error Attention in the Host Attention
7733 * Register and returns 0 otherwise.
7734 **/
7735int
7736lpfc_sli_check_eratt(struct lpfc_hba *phba)
7737{
7738 uint32_t ha_copy;
7739
7740 /* If somebody is waiting to handle an eratt, don't process it
7741 * here. The brdkill function will do this.
7742 */
7743 if (phba->link_flag & LS_IGNORE_ERATT)
7744 return 0;
7745
7746 /* Check if interrupt handler handles this ERATT */
7747 spin_lock_irq(&phba->hbalock);
7748 if (phba->hba_flag & HBA_ERATT_HANDLED) {
7749 /* Interrupt handler has handled ERATT */
7750 spin_unlock_irq(&phba->hbalock);
7751 return 0;
7752 }
7753
a257bf90
JS
7754 /*
7755 * If there is deferred error attention, do not check for error
7756 * attention
7757 */
7758 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7759 spin_unlock_irq(&phba->hbalock);
7760 return 0;
7761 }
7762
3772a991
JS
7763 /* If PCI channel is offline, don't process it */
7764 if (unlikely(pci_channel_offline(phba->pcidev))) {
9399627f 7765 spin_unlock_irq(&phba->hbalock);
3772a991
JS
7766 return 0;
7767 }
7768
7769 switch (phba->sli_rev) {
7770 case LPFC_SLI_REV2:
7771 case LPFC_SLI_REV3:
7772 /* Read chip Host Attention (HA) register */
7773 ha_copy = lpfc_sli_eratt_read(phba);
7774 break;
da0436e9
JS
7775 case LPFC_SLI_REV4:
7776 /* Read devcie Uncoverable Error (UERR) registers */
7777 ha_copy = lpfc_sli4_eratt_read(phba);
7778 break;
3772a991
JS
7779 default:
7780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7781 "0299 Invalid SLI revision (%d)\n",
7782 phba->sli_rev);
7783 ha_copy = 0;
7784 break;
9399627f
JS
7785 }
7786 spin_unlock_irq(&phba->hbalock);
3772a991
JS
7787
7788 return ha_copy;
7789}
7790
7791/**
7792 * lpfc_intr_state_check - Check device state for interrupt handling
7793 * @phba: Pointer to HBA context.
7794 *
7795 * This inline routine checks whether a device or its PCI slot is in a state
7796 * that the interrupt should be handled.
7797 *
7798 * This function returns 0 if the device or the PCI slot is in a state that
7799 * interrupt should be handled, otherwise -EIO.
7800 */
7801static inline int
7802lpfc_intr_state_check(struct lpfc_hba *phba)
7803{
7804 /* If the pci channel is offline, ignore all the interrupts */
7805 if (unlikely(pci_channel_offline(phba->pcidev)))
7806 return -EIO;
7807
7808 /* Update device level interrupt statistics */
7809 phba->sli.slistat.sli_intr++;
7810
7811 /* Ignore all interrupts during initialization. */
7812 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7813 return -EIO;
7814
9399627f
JS
7815 return 0;
7816}
7817
7818/**
3772a991 7819 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
e59058c4
JS
7820 * @irq: Interrupt number.
7821 * @dev_id: The device context pointer.
7822 *
9399627f 7823 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
7824 * service routine when device with SLI-3 interface spec is enabled with
7825 * MSI-X multi-message interrupt mode and there are slow-path events in
7826 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
7827 * interrupt mode, this function is called as part of the device-level
7828 * interrupt handler. When the PCI slot is in error recovery or the HBA
7829 * is undergoing initialization, the interrupt handler will not process
7830 * the interrupt. The link attention and ELS ring attention events are
7831 * handled by the worker thread. The interrupt handler signals the worker
7832 * thread and returns for these events. This function is called without
7833 * any lock held. It gets the hbalock to access and update SLI data
9399627f
JS
7834 * structures.
7835 *
7836 * This function returns IRQ_HANDLED when interrupt is handled else it
7837 * returns IRQ_NONE.
e59058c4 7838 **/
dea3101e 7839irqreturn_t
3772a991 7840lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea3101e 7841{
2e0fef85 7842 struct lpfc_hba *phba;
a747c9ce 7843 uint32_t ha_copy, hc_copy;
dea3101e
JB
7844 uint32_t work_ha_copy;
7845 unsigned long status;
5b75da2f 7846 unsigned long iflag;
dea3101e
JB
7847 uint32_t control;
7848
92d7f7b0 7849 MAILBOX_t *mbox, *pmbox;
858c9f6c
JS
7850 struct lpfc_vport *vport;
7851 struct lpfc_nodelist *ndlp;
7852 struct lpfc_dmabuf *mp;
92d7f7b0
JS
7853 LPFC_MBOXQ_t *pmb;
7854 int rc;
7855
dea3101e
JB
7856 /*
7857 * Get the driver's phba structure from the dev_id and
7858 * assume the HBA is not interrupting.
7859 */
9399627f 7860 phba = (struct lpfc_hba *)dev_id;
dea3101e
JB
7861
7862 if (unlikely(!phba))
7863 return IRQ_NONE;
7864
dea3101e 7865 /*
9399627f
JS
7866 * Stuff needs to be attented to when this function is invoked as an
7867 * individual interrupt handler in MSI-X multi-message interrupt mode
dea3101e 7868 */
9399627f 7869 if (phba->intr_type == MSIX) {
3772a991
JS
7870 /* Check device state for handling interrupt */
7871 if (lpfc_intr_state_check(phba))
9399627f
JS
7872 return IRQ_NONE;
7873 /* Need to read HA REG for slow-path events */
5b75da2f 7874 spin_lock_irqsave(&phba->hbalock, iflag);
34b02dcd 7875 ha_copy = readl(phba->HAregaddr);
9399627f
JS
7876 /* If somebody is waiting to handle an eratt don't process it
7877 * here. The brdkill function will do this.
7878 */
7879 if (phba->link_flag & LS_IGNORE_ERATT)
7880 ha_copy &= ~HA_ERATT;
7881 /* Check the need for handling ERATT in interrupt handler */
7882 if (ha_copy & HA_ERATT) {
7883 if (phba->hba_flag & HBA_ERATT_HANDLED)
7884 /* ERATT polling has handled ERATT */
7885 ha_copy &= ~HA_ERATT;
7886 else
7887 /* Indicate interrupt handler handles ERATT */
7888 phba->hba_flag |= HBA_ERATT_HANDLED;
7889 }
a257bf90
JS
7890
7891 /*
7892 * If there is deferred error attention, do not check for any
7893 * interrupt.
7894 */
7895 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 7896 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
7897 return IRQ_NONE;
7898 }
7899
9399627f 7900 /* Clear up only attention source related to slow-path */
a747c9ce
JS
7901 hc_copy = readl(phba->HCregaddr);
7902 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
7903 HC_LAINT_ENA | HC_ERINT_ENA),
7904 phba->HCregaddr);
9399627f
JS
7905 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7906 phba->HAregaddr);
a747c9ce 7907 writel(hc_copy, phba->HCregaddr);
9399627f 7908 readl(phba->HAregaddr); /* flush */
5b75da2f 7909 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
7910 } else
7911 ha_copy = phba->ha_copy;
dea3101e 7912
dea3101e
JB
7913 work_ha_copy = ha_copy & phba->work_ha_mask;
7914
9399627f 7915 if (work_ha_copy) {
dea3101e
JB
7916 if (work_ha_copy & HA_LATT) {
7917 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
7918 /*
7919 * Turn off Link Attention interrupts
7920 * until CLEAR_LA done
7921 */
5b75da2f 7922 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e
JB
7923 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
7924 control = readl(phba->HCregaddr);
7925 control &= ~HC_LAINT_ENA;
7926 writel(control, phba->HCregaddr);
7927 readl(phba->HCregaddr); /* flush */
5b75da2f 7928 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
7929 }
7930 else
7931 work_ha_copy &= ~HA_LATT;
7932 }
7933
9399627f 7934 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
858c9f6c
JS
7935 /*
7936 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
7937 * the only slow ring.
7938 */
7939 status = (work_ha_copy &
7940 (HA_RXMASK << (4*LPFC_ELS_RING)));
7941 status >>= (4*LPFC_ELS_RING);
7942 if (status & HA_RXMASK) {
5b75da2f 7943 spin_lock_irqsave(&phba->hbalock, iflag);
858c9f6c 7944 control = readl(phba->HCregaddr);
a58cbd52
JS
7945
7946 lpfc_debugfs_slow_ring_trc(phba,
7947 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
7948 control, status,
7949 (uint32_t)phba->sli.slistat.sli_intr);
7950
858c9f6c 7951 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
a58cbd52
JS
7952 lpfc_debugfs_slow_ring_trc(phba,
7953 "ISR Disable ring:"
7954 "pwork:x%x hawork:x%x wait:x%x",
7955 phba->work_ha, work_ha_copy,
7956 (uint32_t)((unsigned long)
5e9d9b82 7957 &phba->work_waitq));
a58cbd52 7958
858c9f6c
JS
7959 control &=
7960 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e
JB
7961 writel(control, phba->HCregaddr);
7962 readl(phba->HCregaddr); /* flush */
dea3101e 7963 }
a58cbd52
JS
7964 else {
7965 lpfc_debugfs_slow_ring_trc(phba,
7966 "ISR slow ring: pwork:"
7967 "x%x hawork:x%x wait:x%x",
7968 phba->work_ha, work_ha_copy,
7969 (uint32_t)((unsigned long)
5e9d9b82 7970 &phba->work_waitq));
a58cbd52 7971 }
5b75da2f 7972 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea3101e
JB
7973 }
7974 }
5b75da2f 7975 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90 7976 if (work_ha_copy & HA_ERATT) {
9399627f 7977 lpfc_sli_read_hs(phba);
a257bf90
JS
7978 /*
7979 * Check if there is a deferred error condition
7980 * is active
7981 */
7982 if ((HS_FFER1 & phba->work_hs) &&
7983 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7984 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7985 phba->hba_flag |= DEFER_ERATT;
7986 /* Clear all interrupt enable conditions */
7987 writel(0, phba->HCregaddr);
7988 readl(phba->HCregaddr);
7989 }
7990 }
7991
9399627f 7992 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
92d7f7b0 7993 pmb = phba->sli.mbox_active;
04c68496 7994 pmbox = &pmb->u.mb;
34b02dcd 7995 mbox = phba->mbox;
858c9f6c 7996 vport = pmb->vport;
92d7f7b0
JS
7997
7998 /* First check out the status word */
7999 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
8000 if (pmbox->mbxOwner != OWN_HOST) {
5b75da2f 8001 spin_unlock_irqrestore(&phba->hbalock, iflag);
92d7f7b0
JS
8002 /*
8003 * Stray Mailbox Interrupt, mbxCommand <cmd>
8004 * mbxStatus <status>
8005 */
09372820 8006 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
92d7f7b0 8007 LOG_SLI,
e8b62011 8008 "(%d):0304 Stray Mailbox "
92d7f7b0
JS
8009 "Interrupt mbxCommand x%x "
8010 "mbxStatus x%x\n",
e8b62011 8011 (vport ? vport->vpi : 0),
92d7f7b0
JS
8012 pmbox->mbxCommand,
8013 pmbox->mbxStatus);
09372820
JS
8014 /* clear mailbox attention bit */
8015 work_ha_copy &= ~HA_MBATT;
8016 } else {
97eab634 8017 phba->sli.mbox_active = NULL;
5b75da2f 8018 spin_unlock_irqrestore(&phba->hbalock, iflag);
09372820
JS
8019 phba->last_completion_time = jiffies;
8020 del_timer(&phba->sli.mbox_tmo);
09372820
JS
8021 if (pmb->mbox_cmpl) {
8022 lpfc_sli_pcimem_bcopy(mbox, pmbox,
8023 MAILBOX_CMD_SIZE);
8024 }
8025 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8026 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8027
8028 lpfc_debugfs_disc_trc(vport,
8029 LPFC_DISC_TRC_MBOX_VPORT,
8030 "MBOX dflt rpi: : "
8031 "status:x%x rpi:x%x",
8032 (uint32_t)pmbox->mbxStatus,
8033 pmbox->un.varWords[0], 0);
8034
8035 if (!pmbox->mbxStatus) {
8036 mp = (struct lpfc_dmabuf *)
8037 (pmb->context1);
8038 ndlp = (struct lpfc_nodelist *)
8039 pmb->context2;
8040
8041 /* Reg_LOGIN of dflt RPI was
8042 * successful. new lets get
8043 * rid of the RPI using the
8044 * same mbox buffer.
8045 */
8046 lpfc_unreg_login(phba,
8047 vport->vpi,
8048 pmbox->un.varWords[0],
8049 pmb);
8050 pmb->mbox_cmpl =
8051 lpfc_mbx_cmpl_dflt_rpi;
8052 pmb->context1 = mp;
8053 pmb->context2 = ndlp;
8054 pmb->vport = vport;
58da1ffb
JS
8055 rc = lpfc_sli_issue_mbox(phba,
8056 pmb,
8057 MBX_NOWAIT);
8058 if (rc != MBX_BUSY)
8059 lpfc_printf_log(phba,
8060 KERN_ERR,
8061 LOG_MBOX | LOG_SLI,
d7c255b2 8062 "0350 rc should have"
6a9c52cf 8063 "been MBX_BUSY\n");
3772a991
JS
8064 if (rc != MBX_NOT_FINISHED)
8065 goto send_current_mbox;
09372820 8066 }
858c9f6c 8067 }
5b75da2f
JS
8068 spin_lock_irqsave(
8069 &phba->pport->work_port_lock,
8070 iflag);
09372820
JS
8071 phba->pport->work_port_events &=
8072 ~WORKER_MBOX_TMO;
5b75da2f
JS
8073 spin_unlock_irqrestore(
8074 &phba->pport->work_port_lock,
8075 iflag);
09372820 8076 lpfc_mbox_cmpl_put(phba, pmb);
858c9f6c 8077 }
97eab634 8078 } else
5b75da2f 8079 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f 8080
92d7f7b0
JS
8081 if ((work_ha_copy & HA_MBATT) &&
8082 (phba->sli.mbox_active == NULL)) {
858c9f6c 8083send_current_mbox:
92d7f7b0 8084 /* Process next mailbox command if there is one */
58da1ffb
JS
8085 do {
8086 rc = lpfc_sli_issue_mbox(phba, NULL,
8087 MBX_NOWAIT);
8088 } while (rc == MBX_NOT_FINISHED);
8089 if (rc != MBX_SUCCESS)
8090 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8091 LOG_SLI, "0349 rc should be "
6a9c52cf 8092 "MBX_SUCCESS\n");
92d7f7b0
JS
8093 }
8094
5b75da2f 8095 spin_lock_irqsave(&phba->hbalock, iflag);
dea3101e 8096 phba->work_ha |= work_ha_copy;
5b75da2f 8097 spin_unlock_irqrestore(&phba->hbalock, iflag);
5e9d9b82 8098 lpfc_worker_wake_up(phba);
dea3101e 8099 }
9399627f 8100 return IRQ_HANDLED;
dea3101e 8101
3772a991 8102} /* lpfc_sli_sp_intr_handler */
9399627f
JS
8103
8104/**
3772a991 8105 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
9399627f
JS
8106 * @irq: Interrupt number.
8107 * @dev_id: The device context pointer.
8108 *
8109 * This function is directly called from the PCI layer as an interrupt
3772a991
JS
8110 * service routine when device with SLI-3 interface spec is enabled with
8111 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
8112 * ring event in the HBA. However, when the device is enabled with either
8113 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
8114 * device-level interrupt handler. When the PCI slot is in error recovery
8115 * or the HBA is undergoing initialization, the interrupt handler will not
8116 * process the interrupt. The SCSI FCP fast-path ring event are handled in
8117 * the intrrupt context. This function is called without any lock held.
8118 * It gets the hbalock to access and update SLI data structures.
9399627f
JS
8119 *
8120 * This function returns IRQ_HANDLED when interrupt is handled else it
8121 * returns IRQ_NONE.
8122 **/
8123irqreturn_t
3772a991 8124lpfc_sli_fp_intr_handler(int irq, void *dev_id)
9399627f
JS
8125{
8126 struct lpfc_hba *phba;
8127 uint32_t ha_copy;
8128 unsigned long status;
5b75da2f 8129 unsigned long iflag;
9399627f
JS
8130
8131 /* Get the driver's phba structure from the dev_id and
8132 * assume the HBA is not interrupting.
8133 */
8134 phba = (struct lpfc_hba *) dev_id;
8135
8136 if (unlikely(!phba))
8137 return IRQ_NONE;
8138
8139 /*
8140 * Stuff needs to be attented to when this function is invoked as an
8141 * individual interrupt handler in MSI-X multi-message interrupt mode
8142 */
8143 if (phba->intr_type == MSIX) {
3772a991
JS
8144 /* Check device state for handling interrupt */
8145 if (lpfc_intr_state_check(phba))
9399627f
JS
8146 return IRQ_NONE;
8147 /* Need to read HA REG for FCP ring and other ring events */
8148 ha_copy = readl(phba->HAregaddr);
8149 /* Clear up only attention source related to fast-path */
5b75da2f 8150 spin_lock_irqsave(&phba->hbalock, iflag);
a257bf90
JS
8151 /*
8152 * If there is deferred error attention, do not check for
8153 * any interrupt.
8154 */
8155 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3772a991 8156 spin_unlock_irqrestore(&phba->hbalock, iflag);
a257bf90
JS
8157 return IRQ_NONE;
8158 }
9399627f
JS
8159 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
8160 phba->HAregaddr);
8161 readl(phba->HAregaddr); /* flush */
5b75da2f 8162 spin_unlock_irqrestore(&phba->hbalock, iflag);
9399627f
JS
8163 } else
8164 ha_copy = phba->ha_copy;
dea3101e
JB
8165
8166 /*
9399627f 8167 * Process all events on FCP ring. Take the optimized path for FCP IO.
dea3101e 8168 */
9399627f
JS
8169 ha_copy &= ~(phba->work_ha_mask);
8170
8171 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea3101e 8172 status >>= (4*LPFC_FCP_RING);
858c9f6c 8173 if (status & HA_RXMASK)
dea3101e
JB
8174 lpfc_sli_handle_fast_ring_event(phba,
8175 &phba->sli.ring[LPFC_FCP_RING],
8176 status);
a4bc3379
JS
8177
8178 if (phba->cfg_multi_ring_support == 2) {
8179 /*
9399627f
JS
8180 * Process all events on extra ring. Take the optimized path
8181 * for extra ring IO.
a4bc3379 8182 */
9399627f 8183 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
a4bc3379 8184 status >>= (4*LPFC_EXTRA_RING);
858c9f6c 8185 if (status & HA_RXMASK) {
a4bc3379
JS
8186 lpfc_sli_handle_fast_ring_event(phba,
8187 &phba->sli.ring[LPFC_EXTRA_RING],
8188 status);
8189 }
8190 }
dea3101e 8191 return IRQ_HANDLED;
3772a991 8192} /* lpfc_sli_fp_intr_handler */
9399627f
JS
8193
8194/**
3772a991 8195 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9399627f
JS
8196 * @irq: Interrupt number.
8197 * @dev_id: The device context pointer.
8198 *
3772a991
JS
8199 * This function is the HBA device-level interrupt handler to device with
8200 * SLI-3 interface spec, called from the PCI layer when either MSI or
8201 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
8202 * requires driver attention. This function invokes the slow-path interrupt
8203 * attention handling function and fast-path interrupt attention handling
8204 * function in turn to process the relevant HBA attention events. This
8205 * function is called without any lock held. It gets the hbalock to access
8206 * and update SLI data structures.
9399627f
JS
8207 *
8208 * This function returns IRQ_HANDLED when interrupt is handled, else it
8209 * returns IRQ_NONE.
8210 **/
8211irqreturn_t
3772a991 8212lpfc_sli_intr_handler(int irq, void *dev_id)
9399627f
JS
8213{
8214 struct lpfc_hba *phba;
8215 irqreturn_t sp_irq_rc, fp_irq_rc;
8216 unsigned long status1, status2;
a747c9ce 8217 uint32_t hc_copy;
9399627f
JS
8218
8219 /*
8220 * Get the driver's phba structure from the dev_id and
8221 * assume the HBA is not interrupting.
8222 */
8223 phba = (struct lpfc_hba *) dev_id;
8224
8225 if (unlikely(!phba))
8226 return IRQ_NONE;
8227
3772a991
JS
8228 /* Check device state for handling interrupt */
8229 if (lpfc_intr_state_check(phba))
9399627f
JS
8230 return IRQ_NONE;
8231
8232 spin_lock(&phba->hbalock);
8233 phba->ha_copy = readl(phba->HAregaddr);
8234 if (unlikely(!phba->ha_copy)) {
8235 spin_unlock(&phba->hbalock);
8236 return IRQ_NONE;
8237 } else if (phba->ha_copy & HA_ERATT) {
8238 if (phba->hba_flag & HBA_ERATT_HANDLED)
8239 /* ERATT polling has handled ERATT */
8240 phba->ha_copy &= ~HA_ERATT;
8241 else
8242 /* Indicate interrupt handler handles ERATT */
8243 phba->hba_flag |= HBA_ERATT_HANDLED;
8244 }
8245
a257bf90
JS
8246 /*
8247 * If there is deferred error attention, do not check for any interrupt.
8248 */
8249 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8250 spin_unlock_irq(&phba->hbalock);
8251 return IRQ_NONE;
8252 }
8253
9399627f 8254 /* Clear attention sources except link and error attentions */
a747c9ce
JS
8255 hc_copy = readl(phba->HCregaddr);
8256 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
8257 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
8258 phba->HCregaddr);
9399627f 8259 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
a747c9ce 8260 writel(hc_copy, phba->HCregaddr);
9399627f
JS
8261 readl(phba->HAregaddr); /* flush */
8262 spin_unlock(&phba->hbalock);
8263
8264 /*
8265 * Invokes slow-path host attention interrupt handling as appropriate.
8266 */
8267
8268 /* status of events with mailbox and link attention */
8269 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
8270
8271 /* status of events with ELS ring */
8272 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
8273 status2 >>= (4*LPFC_ELS_RING);
8274
8275 if (status1 || (status2 & HA_RXMASK))
3772a991 8276 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
9399627f
JS
8277 else
8278 sp_irq_rc = IRQ_NONE;
8279
8280 /*
8281 * Invoke fast-path host attention interrupt handling as appropriate.
8282 */
8283
8284 /* status of events with FCP ring */
8285 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
8286 status1 >>= (4*LPFC_FCP_RING);
8287
8288 /* status of events with extra ring */
8289 if (phba->cfg_multi_ring_support == 2) {
8290 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
8291 status2 >>= (4*LPFC_EXTRA_RING);
8292 } else
8293 status2 = 0;
8294
8295 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
3772a991 8296 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
9399627f
JS
8297 else
8298 fp_irq_rc = IRQ_NONE;
dea3101e 8299
9399627f
JS
8300 /* Return device-level interrupt handling status */
8301 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
3772a991 8302} /* lpfc_sli_intr_handler */
4f774513
JS
8303
8304/**
8305 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8306 * @phba: pointer to lpfc hba data structure.
8307 *
8308 * This routine is invoked by the worker thread to process all the pending
8309 * SLI4 FCP abort XRI events.
8310 **/
8311void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8312{
8313 struct lpfc_cq_event *cq_event;
8314
8315 /* First, declare the fcp xri abort event has been handled */
8316 spin_lock_irq(&phba->hbalock);
8317 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8318 spin_unlock_irq(&phba->hbalock);
8319 /* Now, handle all the fcp xri abort events */
8320 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8321 /* Get the first event from the head of the event queue */
8322 spin_lock_irq(&phba->hbalock);
8323 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8324 cq_event, struct lpfc_cq_event, list);
8325 spin_unlock_irq(&phba->hbalock);
8326 /* Notify aborted XRI for FCP work queue */
8327 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8328 /* Free the event processed back to the free pool */
8329 lpfc_sli4_cq_event_release(phba, cq_event);
8330 }
8331}
8332
8333/**
8334 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8335 * @phba: pointer to lpfc hba data structure.
8336 *
8337 * This routine is invoked by the worker thread to process all the pending
8338 * SLI4 els abort xri events.
8339 **/
8340void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8341{
8342 struct lpfc_cq_event *cq_event;
8343
8344 /* First, declare the els xri abort event has been handled */
8345 spin_lock_irq(&phba->hbalock);
8346 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8347 spin_unlock_irq(&phba->hbalock);
8348 /* Now, handle all the els xri abort events */
8349 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8350 /* Get the first event from the head of the event queue */
8351 spin_lock_irq(&phba->hbalock);
8352 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8353 cq_event, struct lpfc_cq_event, list);
8354 spin_unlock_irq(&phba->hbalock);
8355 /* Notify aborted XRI for ELS work queue */
8356 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8357 /* Free the event processed back to the free pool */
8358 lpfc_sli4_cq_event_release(phba, cq_event);
8359 }
8360}
8361
8362static void
8363lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8364 struct lpfc_iocbq *pIocbOut,
8365 struct lpfc_wcqe_complete *wcqe)
8366{
8367 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8368
8369 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8370 sizeof(struct lpfc_iocbq) - offset);
4f774513
JS
8371 /* Map WCQE parameters into irspiocb parameters */
8372 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8373 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8374 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8375 pIocbIn->iocb.un.fcpi.fcpi_parm =
8376 pIocbOut->iocb.un.fcpi.fcpi_parm -
8377 wcqe->total_data_placed;
8378 else
8379 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8380 else
8381 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
4f774513
JS
8382}
8383
45ed1190
JS
8384/**
8385 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8386 * @phba: Pointer to HBA context object.
8387 * @wcqe: Pointer to work-queue completion queue entry.
8388 *
8389 * This routine handles an ELS work-queue completion event and construct
8390 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8391 * discovery engine to handle.
8392 *
8393 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8394 **/
8395static struct lpfc_iocbq *
8396lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8397 struct lpfc_iocbq *irspiocbq)
8398{
8399 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8400 struct lpfc_iocbq *cmdiocbq;
8401 struct lpfc_wcqe_complete *wcqe;
8402 unsigned long iflags;
8403
8404 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8405 spin_lock_irqsave(&phba->hbalock, iflags);
8406 pring->stats.iocb_event++;
8407 /* Look up the ELS command IOCB and create pseudo response IOCB */
8408 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8409 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8410 spin_unlock_irqrestore(&phba->hbalock, iflags);
8411
8412 if (unlikely(!cmdiocbq)) {
8413 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8414 "0386 ELS complete with no corresponding "
8415 "cmdiocb: iotag (%d)\n",
8416 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8417 lpfc_sli_release_iocbq(phba, irspiocbq);
8418 return NULL;
8419 }
8420
8421 /* Fake the irspiocbq and copy necessary response information */
8422 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8423
8424 return irspiocbq;
8425}
8426
04c68496
JS
8427/**
8428 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8429 * @phba: Pointer to HBA context object.
8430 * @cqe: Pointer to mailbox completion queue entry.
8431 *
8432 * This routine process a mailbox completion queue entry with asynchrous
8433 * event.
8434 *
8435 * Return: true if work posted to worker thread, otherwise false.
8436 **/
8437static bool
8438lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8439{
8440 struct lpfc_cq_event *cq_event;
8441 unsigned long iflags;
8442
8443 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8444 "0392 Async Event: word0:x%x, word1:x%x, "
8445 "word2:x%x, word3:x%x\n", mcqe->word0,
8446 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8447
8448 /* Allocate a new internal CQ_EVENT entry */
8449 cq_event = lpfc_sli4_cq_event_alloc(phba);
8450 if (!cq_event) {
8451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8452 "0394 Failed to allocate CQ_EVENT entry\n");
8453 return false;
8454 }
8455
8456 /* Move the CQE into an asynchronous event entry */
8457 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8458 spin_lock_irqsave(&phba->hbalock, iflags);
8459 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8460 /* Set the async event flag */
8461 phba->hba_flag |= ASYNC_EVENT;
8462 spin_unlock_irqrestore(&phba->hbalock, iflags);
8463
8464 return true;
8465}
8466
8467/**
8468 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8469 * @phba: Pointer to HBA context object.
8470 * @cqe: Pointer to mailbox completion queue entry.
8471 *
8472 * This routine process a mailbox completion queue entry with mailbox
8473 * completion event.
8474 *
8475 * Return: true if work posted to worker thread, otherwise false.
8476 **/
8477static bool
8478lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8479{
8480 uint32_t mcqe_status;
8481 MAILBOX_t *mbox, *pmbox;
8482 struct lpfc_mqe *mqe;
8483 struct lpfc_vport *vport;
8484 struct lpfc_nodelist *ndlp;
8485 struct lpfc_dmabuf *mp;
8486 unsigned long iflags;
8487 LPFC_MBOXQ_t *pmb;
8488 bool workposted = false;
8489 int rc;
8490
8491 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8492 if (!bf_get(lpfc_trailer_completed, mcqe))
8493 goto out_no_mqe_complete;
8494
8495 /* Get the reference to the active mbox command */
8496 spin_lock_irqsave(&phba->hbalock, iflags);
8497 pmb = phba->sli.mbox_active;
8498 if (unlikely(!pmb)) {
8499 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8500 "1832 No pending MBOX command to handle\n");
8501 spin_unlock_irqrestore(&phba->hbalock, iflags);
8502 goto out_no_mqe_complete;
8503 }
8504 spin_unlock_irqrestore(&phba->hbalock, iflags);
8505 mqe = &pmb->u.mqe;
8506 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8507 mbox = phba->mbox;
8508 vport = pmb->vport;
8509
8510 /* Reset heartbeat timer */
8511 phba->last_completion_time = jiffies;
8512 del_timer(&phba->sli.mbox_tmo);
8513
8514 /* Move mbox data to caller's mailbox region, do endian swapping */
8515 if (pmb->mbox_cmpl && mbox)
8516 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8517 /* Set the mailbox status with SLI4 range 0x4000 */
8518 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8519 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8520 bf_set(lpfc_mqe_status, mqe,
8521 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8522
8523 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8524 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8525 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8526 "MBOX dflt rpi: status:x%x rpi:x%x",
8527 mcqe_status,
8528 pmbox->un.varWords[0], 0);
8529 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8530 mp = (struct lpfc_dmabuf *)(pmb->context1);
8531 ndlp = (struct lpfc_nodelist *)pmb->context2;
8532 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8533 * RID of the PPI using the same mbox buffer.
8534 */
8535 lpfc_unreg_login(phba, vport->vpi,
8536 pmbox->un.varWords[0], pmb);
8537 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8538 pmb->context1 = mp;
8539 pmb->context2 = ndlp;
8540 pmb->vport = vport;
8541 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8542 if (rc != MBX_BUSY)
8543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8544 LOG_SLI, "0385 rc should "
8545 "have been MBX_BUSY\n");
8546 if (rc != MBX_NOT_FINISHED)
8547 goto send_current_mbox;
8548 }
8549 }
8550 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8551 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8552 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8553
8554 /* There is mailbox completion work to do */
8555 spin_lock_irqsave(&phba->hbalock, iflags);
8556 __lpfc_mbox_cmpl_put(phba, pmb);
8557 phba->work_ha |= HA_MBATT;
8558 spin_unlock_irqrestore(&phba->hbalock, iflags);
8559 workposted = true;
8560
8561send_current_mbox:
8562 spin_lock_irqsave(&phba->hbalock, iflags);
8563 /* Release the mailbox command posting token */
8564 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8565 /* Setting active mailbox pointer need to be in sync to flag clear */
8566 phba->sli.mbox_active = NULL;
8567 spin_unlock_irqrestore(&phba->hbalock, iflags);
8568 /* Wake up worker thread to post the next pending mailbox command */
8569 lpfc_worker_wake_up(phba);
8570out_no_mqe_complete:
8571 if (bf_get(lpfc_trailer_consumed, mcqe))
8572 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8573 return workposted;
8574}
8575
8576/**
8577 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8578 * @phba: Pointer to HBA context object.
8579 * @cqe: Pointer to mailbox completion queue entry.
8580 *
8581 * This routine process a mailbox completion queue entry, it invokes the
8582 * proper mailbox complete handling or asynchrous event handling routine
8583 * according to the MCQE's async bit.
8584 *
8585 * Return: true if work posted to worker thread, otherwise false.
8586 **/
8587static bool
8588lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8589{
8590 struct lpfc_mcqe mcqe;
8591 bool workposted;
8592
8593 /* Copy the mailbox MCQE and convert endian order as needed */
8594 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8595
8596 /* Invoke the proper event handling routine */
8597 if (!bf_get(lpfc_trailer_async, &mcqe))
8598 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8599 else
8600 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8601 return workposted;
8602}
8603
4f774513
JS
8604/**
8605 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8606 * @phba: Pointer to HBA context object.
8607 * @wcqe: Pointer to work-queue completion queue entry.
8608 *
8609 * This routine handles an ELS work-queue completion event.
8610 *
8611 * Return: true if work posted to worker thread, otherwise false.
8612 **/
8613static bool
8614lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8615 struct lpfc_wcqe_complete *wcqe)
8616{
4f774513
JS
8617 struct lpfc_iocbq *irspiocbq;
8618 unsigned long iflags;
4f774513 8619
45ed1190 8620 /* Get an irspiocbq for later ELS response processing use */
4f774513
JS
8621 irspiocbq = lpfc_sli_get_iocbq(phba);
8622 if (!irspiocbq) {
8623 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8624 "0387 Failed to allocate an iocbq\n");
45ed1190 8625 return false;
4f774513 8626 }
4f774513 8627
45ed1190
JS
8628 /* Save off the slow-path queue event for work thread to process */
8629 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
4f774513 8630 spin_lock_irqsave(&phba->hbalock, iflags);
4d9ab994 8631 list_add_tail(&irspiocbq->cq_event.list,
45ed1190
JS
8632 &phba->sli4_hba.sp_queue_event);
8633 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513 8634 spin_unlock_irqrestore(&phba->hbalock, iflags);
4f774513 8635
45ed1190 8636 return true;
4f774513
JS
8637}
8638
8639/**
8640 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8641 * @phba: Pointer to HBA context object.
8642 * @wcqe: Pointer to work-queue completion queue entry.
8643 *
8644 * This routine handles slow-path WQ entry comsumed event by invoking the
8645 * proper WQ release routine to the slow-path WQ.
8646 **/
8647static void
8648lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8649 struct lpfc_wcqe_release *wcqe)
8650{
8651 /* Check for the slow-path ELS work queue */
8652 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8653 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8654 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8655 else
8656 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8657 "2579 Slow-path wqe consume event carries "
8658 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8659 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8660 phba->sli4_hba.els_wq->queue_id);
8661}
8662
8663/**
8664 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8665 * @phba: Pointer to HBA context object.
8666 * @cq: Pointer to a WQ completion queue.
8667 * @wcqe: Pointer to work-queue completion queue entry.
8668 *
8669 * This routine handles an XRI abort event.
8670 *
8671 * Return: true if work posted to worker thread, otherwise false.
8672 **/
8673static bool
8674lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8675 struct lpfc_queue *cq,
8676 struct sli4_wcqe_xri_aborted *wcqe)
8677{
8678 bool workposted = false;
8679 struct lpfc_cq_event *cq_event;
8680 unsigned long iflags;
8681
8682 /* Allocate a new internal CQ_EVENT entry */
8683 cq_event = lpfc_sli4_cq_event_alloc(phba);
8684 if (!cq_event) {
8685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8686 "0602 Failed to allocate CQ_EVENT entry\n");
8687 return false;
8688 }
8689
8690 /* Move the CQE into the proper xri abort event list */
8691 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8692 switch (cq->subtype) {
8693 case LPFC_FCP:
8694 spin_lock_irqsave(&phba->hbalock, iflags);
8695 list_add_tail(&cq_event->list,
8696 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8697 /* Set the fcp xri abort event flag */
8698 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8699 spin_unlock_irqrestore(&phba->hbalock, iflags);
8700 workposted = true;
8701 break;
8702 case LPFC_ELS:
8703 spin_lock_irqsave(&phba->hbalock, iflags);
8704 list_add_tail(&cq_event->list,
8705 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8706 /* Set the els xri abort event flag */
8707 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8708 spin_unlock_irqrestore(&phba->hbalock, iflags);
8709 workposted = true;
8710 break;
8711 default:
8712 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8713 "0603 Invalid work queue CQE subtype (x%x)\n",
8714 cq->subtype);
8715 workposted = false;
8716 break;
8717 }
8718 return workposted;
8719}
8720
4f774513
JS
8721/**
8722 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8723 * @phba: Pointer to HBA context object.
8724 * @rcqe: Pointer to receive-queue completion queue entry.
8725 *
8726 * This routine process a receive-queue completion queue entry.
8727 *
8728 * Return: true if work posted to worker thread, otherwise false.
8729 **/
8730static bool
4d9ab994 8731lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
4f774513 8732{
4f774513
JS
8733 bool workposted = false;
8734 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8735 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8736 struct hbq_dmabuf *dma_buf;
8737 uint32_t status;
8738 unsigned long iflags;
8739
4d9ab994 8740 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
4f774513
JS
8741 goto out;
8742
4d9ab994 8743 status = bf_get(lpfc_rcqe_status, rcqe);
4f774513
JS
8744 switch (status) {
8745 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8746 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8747 "2537 Receive Frame Truncated!!\n");
8748 case FC_STATUS_RQ_SUCCESS:
5ffc266e 8749 lpfc_sli4_rq_release(hrq, drq);
4f774513
JS
8750 spin_lock_irqsave(&phba->hbalock, iflags);
8751 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8752 if (!dma_buf) {
8753 spin_unlock_irqrestore(&phba->hbalock, iflags);
8754 goto out;
8755 }
4d9ab994 8756 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
4f774513 8757 /* save off the frame for the word thread to process */
4d9ab994 8758 list_add_tail(&dma_buf->cq_event.list,
45ed1190 8759 &phba->sli4_hba.sp_queue_event);
4f774513 8760 /* Frame received */
45ed1190 8761 phba->hba_flag |= HBA_SP_QUEUE_EVT;
4f774513
JS
8762 spin_unlock_irqrestore(&phba->hbalock, iflags);
8763 workposted = true;
8764 break;
8765 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8766 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8767 /* Post more buffers if possible */
8768 spin_lock_irqsave(&phba->hbalock, iflags);
8769 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8770 spin_unlock_irqrestore(&phba->hbalock, iflags);
8771 workposted = true;
8772 break;
8773 }
8774out:
8775 return workposted;
4f774513
JS
8776}
8777
4d9ab994
JS
8778/**
8779 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8780 * @phba: Pointer to HBA context object.
8781 * @cq: Pointer to the completion queue.
8782 * @wcqe: Pointer to a completion queue entry.
8783 *
8784 * This routine process a slow-path work-queue or recieve queue completion queue
8785 * entry.
8786 *
8787 * Return: true if work posted to worker thread, otherwise false.
8788 **/
8789static bool
8790lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8791 struct lpfc_cqe *cqe)
8792{
45ed1190 8793 struct lpfc_cqe cqevt;
4d9ab994
JS
8794 bool workposted = false;
8795
8796 /* Copy the work queue CQE and convert endian order if needed */
45ed1190 8797 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
4d9ab994
JS
8798
8799 /* Check and process for different type of WCQE and dispatch */
45ed1190 8800 switch (bf_get(lpfc_cqe_code, &cqevt)) {
4d9ab994 8801 case CQE_CODE_COMPL_WQE:
45ed1190 8802 /* Process the WQ/RQ complete event */
4d9ab994 8803 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
45ed1190 8804 (struct lpfc_wcqe_complete *)&cqevt);
4d9ab994
JS
8805 break;
8806 case CQE_CODE_RELEASE_WQE:
8807 /* Process the WQ release event */
8808 lpfc_sli4_sp_handle_rel_wcqe(phba,
45ed1190 8809 (struct lpfc_wcqe_release *)&cqevt);
4d9ab994
JS
8810 break;
8811 case CQE_CODE_XRI_ABORTED:
8812 /* Process the WQ XRI abort event */
8813 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
45ed1190 8814 (struct sli4_wcqe_xri_aborted *)&cqevt);
4d9ab994
JS
8815 break;
8816 case CQE_CODE_RECEIVE:
8817 /* Process the RQ event */
8818 workposted = lpfc_sli4_sp_handle_rcqe(phba,
45ed1190 8819 (struct lpfc_rcqe *)&cqevt);
4d9ab994
JS
8820 break;
8821 default:
8822 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8823 "0388 Not a valid WCQE code: x%x\n",
45ed1190 8824 bf_get(lpfc_cqe_code, &cqevt));
4d9ab994
JS
8825 break;
8826 }
8827 return workposted;
8828}
8829
4f774513
JS
8830/**
8831 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8832 * @phba: Pointer to HBA context object.
8833 * @eqe: Pointer to fast-path event queue entry.
8834 *
8835 * This routine process a event queue entry from the slow-path event queue.
8836 * It will check the MajorCode and MinorCode to determine this is for a
8837 * completion event on a completion queue, if not, an error shall be logged
8838 * and just return. Otherwise, it will get to the corresponding completion
8839 * queue and process all the entries on that completion queue, rearm the
8840 * completion queue, and then return.
8841 *
8842 **/
8843static void
8844lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8845{
8846 struct lpfc_queue *cq = NULL, *childq, *speq;
8847 struct lpfc_cqe *cqe;
8848 bool workposted = false;
8849 int ecount = 0;
8850 uint16_t cqid;
8851
8852 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8853 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8855 "0359 Not a valid slow-path completion "
8856 "event: majorcode=x%x, minorcode=x%x\n",
8857 bf_get(lpfc_eqe_major_code, eqe),
8858 bf_get(lpfc_eqe_minor_code, eqe));
8859 return;
8860 }
8861
8862 /* Get the reference to the corresponding CQ */
8863 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8864
8865 /* Search for completion queue pointer matching this cqid */
8866 speq = phba->sli4_hba.sp_eq;
8867 list_for_each_entry(childq, &speq->child_list, list) {
8868 if (childq->queue_id == cqid) {
8869 cq = childq;
8870 break;
8871 }
8872 }
8873 if (unlikely(!cq)) {
8874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8875 "0365 Slow-path CQ identifier (%d) does "
8876 "not exist\n", cqid);
8877 return;
8878 }
8879
8880 /* Process all the entries to the CQ */
8881 switch (cq->type) {
8882 case LPFC_MCQ:
8883 while ((cqe = lpfc_sli4_cq_get(cq))) {
8884 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8885 if (!(++ecount % LPFC_GET_QE_REL_INT))
8886 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8887 }
8888 break;
8889 case LPFC_WCQ:
8890 while ((cqe = lpfc_sli4_cq_get(cq))) {
4d9ab994 8891 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
4f774513
JS
8892 if (!(++ecount % LPFC_GET_QE_REL_INT))
8893 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8894 }
8895 break;
8896 default:
8897 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8898 "0370 Invalid completion queue type (%d)\n",
8899 cq->type);
8900 return;
8901 }
8902
8903 /* Catch the no cq entry condition, log an error */
8904 if (unlikely(ecount == 0))
8905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8906 "0371 No entry from the CQ: identifier "
8907 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8908
8909 /* In any case, flash and re-arm the RCQ */
8910 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8911
8912 /* wake up worker thread if there are works to be done */
8913 if (workposted)
8914 lpfc_worker_wake_up(phba);
8915}
8916
8917/**
8918 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8919 * @eqe: Pointer to fast-path completion queue entry.
8920 *
8921 * This routine process a fast-path work queue completion entry from fast-path
8922 * event queue for FCP command response completion.
8923 **/
8924static void
8925lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8926 struct lpfc_wcqe_complete *wcqe)
8927{
8928 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8929 struct lpfc_iocbq *cmdiocbq;
8930 struct lpfc_iocbq irspiocbq;
8931 unsigned long iflags;
8932
8933 spin_lock_irqsave(&phba->hbalock, iflags);
8934 pring->stats.iocb_event++;
8935 spin_unlock_irqrestore(&phba->hbalock, iflags);
8936
8937 /* Check for response status */
8938 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8939 /* If resource errors reported from HBA, reduce queue
8940 * depth of the SCSI device.
8941 */
8942 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8943 IOSTAT_LOCAL_REJECT) &&
8944 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8945 phba->lpfc_rampdown_queue_depth(phba);
8946 }
8947 /* Log the error status */
8948 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8949 "0373 FCP complete error: status=x%x, "
8950 "hw_status=x%x, total_data_specified=%d, "
8951 "parameter=x%x, word3=x%x\n",
8952 bf_get(lpfc_wcqe_c_status, wcqe),
8953 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8954 wcqe->total_data_placed, wcqe->parameter,
8955 wcqe->word3);
8956 }
8957
8958 /* Look up the FCP command IOCB and create pseudo response IOCB */
8959 spin_lock_irqsave(&phba->hbalock, iflags);
8960 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8961 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8962 spin_unlock_irqrestore(&phba->hbalock, iflags);
8963 if (unlikely(!cmdiocbq)) {
8964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8965 "0374 FCP complete with no corresponding "
8966 "cmdiocb: iotag (%d)\n",
8967 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8968 return;
8969 }
8970 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8971 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8972 "0375 FCP cmdiocb not callback function "
8973 "iotag: (%d)\n",
8974 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8975 return;
8976 }
8977
8978 /* Fake the irspiocb and copy necessary response information */
8979 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8980
8981 /* Pass the cmd_iocb and the rsp state to the upper layer */
8982 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8983}
8984
8985/**
8986 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8987 * @phba: Pointer to HBA context object.
8988 * @cq: Pointer to completion queue.
8989 * @wcqe: Pointer to work-queue completion queue entry.
8990 *
8991 * This routine handles an fast-path WQ entry comsumed event by invoking the
8992 * proper WQ release routine to the slow-path WQ.
8993 **/
8994static void
8995lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8996 struct lpfc_wcqe_release *wcqe)
8997{
8998 struct lpfc_queue *childwq;
8999 bool wqid_matched = false;
9000 uint16_t fcp_wqid;
9001
9002 /* Check for fast-path FCP work queue release */
9003 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
9004 list_for_each_entry(childwq, &cq->child_list, list) {
9005 if (childwq->queue_id == fcp_wqid) {
9006 lpfc_sli4_wq_release(childwq,
9007 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
9008 wqid_matched = true;
9009 break;
9010 }
9011 }
9012 /* Report warning log message if no match found */
9013 if (wqid_matched != true)
9014 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9015 "2580 Fast-path wqe consume event carries "
9016 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
9017}
9018
9019/**
9020 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
9021 * @cq: Pointer to the completion queue.
9022 * @eqe: Pointer to fast-path completion queue entry.
9023 *
9024 * This routine process a fast-path work queue completion entry from fast-path
9025 * event queue for FCP command response completion.
9026 **/
9027static int
9028lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
9029 struct lpfc_cqe *cqe)
9030{
9031 struct lpfc_wcqe_release wcqe;
9032 bool workposted = false;
9033
9034 /* Copy the work queue CQE and convert endian order if needed */
9035 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
9036
9037 /* Check and process for different type of WCQE and dispatch */
9038 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
9039 case CQE_CODE_COMPL_WQE:
9040 /* Process the WQ complete event */
9041 lpfc_sli4_fp_handle_fcp_wcqe(phba,
9042 (struct lpfc_wcqe_complete *)&wcqe);
9043 break;
9044 case CQE_CODE_RELEASE_WQE:
9045 /* Process the WQ release event */
9046 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
9047 (struct lpfc_wcqe_release *)&wcqe);
9048 break;
9049 case CQE_CODE_XRI_ABORTED:
9050 /* Process the WQ XRI abort event */
9051 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
9052 (struct sli4_wcqe_xri_aborted *)&wcqe);
9053 break;
9054 default:
9055 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9056 "0144 Not a valid WCQE code: x%x\n",
9057 bf_get(lpfc_wcqe_c_code, &wcqe));
9058 break;
9059 }
9060 return workposted;
9061}
9062
9063/**
9064 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
9065 * @phba: Pointer to HBA context object.
9066 * @eqe: Pointer to fast-path event queue entry.
9067 *
9068 * This routine process a event queue entry from the fast-path event queue.
9069 * It will check the MajorCode and MinorCode to determine this is for a
9070 * completion event on a completion queue, if not, an error shall be logged
9071 * and just return. Otherwise, it will get to the corresponding completion
9072 * queue and process all the entries on the completion queue, rearm the
9073 * completion queue, and then return.
9074 **/
9075static void
9076lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9077 uint32_t fcp_cqidx)
9078{
9079 struct lpfc_queue *cq;
9080 struct lpfc_cqe *cqe;
9081 bool workposted = false;
9082 uint16_t cqid;
9083 int ecount = 0;
9084
9085 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
9086 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9087 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9088 "0366 Not a valid fast-path completion "
9089 "event: majorcode=x%x, minorcode=x%x\n",
9090 bf_get(lpfc_eqe_major_code, eqe),
9091 bf_get(lpfc_eqe_minor_code, eqe));
9092 return;
9093 }
9094
9095 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9096 if (unlikely(!cq)) {
9097 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9098 "0367 Fast-path completion queue does not "
9099 "exist\n");
9100 return;
9101 }
9102
9103 /* Get the reference to the corresponding CQ */
9104 cqid = bf_get(lpfc_eqe_resource_id, eqe);
9105 if (unlikely(cqid != cq->queue_id)) {
9106 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9107 "0368 Miss-matched fast-path completion "
9108 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
9109 cqid, cq->queue_id);
9110 return;
9111 }
9112
9113 /* Process all the entries to the CQ */
9114 while ((cqe = lpfc_sli4_cq_get(cq))) {
9115 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
9116 if (!(++ecount % LPFC_GET_QE_REL_INT))
9117 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9118 }
9119
9120 /* Catch the no cq entry condition */
9121 if (unlikely(ecount == 0))
9122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9123 "0369 No entry from fast-path completion "
9124 "queue fcpcqid=%d\n", cq->queue_id);
9125
9126 /* In any case, flash and re-arm the CQ */
9127 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
9128
9129 /* wake up worker thread if there are works to be done */
9130 if (workposted)
9131 lpfc_worker_wake_up(phba);
9132}
9133
9134static void
9135lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
9136{
9137 struct lpfc_eqe *eqe;
9138
9139 /* walk all the EQ entries and drop on the floor */
9140 while ((eqe = lpfc_sli4_eq_get(eq)))
9141 ;
9142
9143 /* Clear and re-arm the EQ */
9144 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
9145}
9146
9147/**
9148 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
9149 * @irq: Interrupt number.
9150 * @dev_id: The device context pointer.
9151 *
9152 * This function is directly called from the PCI layer as an interrupt
9153 * service routine when device with SLI-4 interface spec is enabled with
9154 * MSI-X multi-message interrupt mode and there are slow-path events in
9155 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9156 * interrupt mode, this function is called as part of the device-level
9157 * interrupt handler. When the PCI slot is in error recovery or the HBA is
9158 * undergoing initialization, the interrupt handler will not process the
9159 * interrupt. The link attention and ELS ring attention events are handled
9160 * by the worker thread. The interrupt handler signals the worker thread
9161 * and returns for these events. This function is called without any lock
9162 * held. It gets the hbalock to access and update SLI data structures.
9163 *
9164 * This function returns IRQ_HANDLED when interrupt is handled else it
9165 * returns IRQ_NONE.
9166 **/
9167irqreturn_t
9168lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
9169{
9170 struct lpfc_hba *phba;
9171 struct lpfc_queue *speq;
9172 struct lpfc_eqe *eqe;
9173 unsigned long iflag;
9174 int ecount = 0;
9175
9176 /*
9177 * Get the driver's phba structure from the dev_id
9178 */
9179 phba = (struct lpfc_hba *)dev_id;
9180
9181 if (unlikely(!phba))
9182 return IRQ_NONE;
9183
9184 /* Get to the EQ struct associated with this vector */
9185 speq = phba->sli4_hba.sp_eq;
9186
9187 /* Check device state for handling interrupt */
9188 if (unlikely(lpfc_intr_state_check(phba))) {
9189 /* Check again for link_state with lock held */
9190 spin_lock_irqsave(&phba->hbalock, iflag);
9191 if (phba->link_state < LPFC_LINK_DOWN)
9192 /* Flush, clear interrupt, and rearm the EQ */
9193 lpfc_sli4_eq_flush(phba, speq);
9194 spin_unlock_irqrestore(&phba->hbalock, iflag);
9195 return IRQ_NONE;
9196 }
9197
9198 /*
9199 * Process all the event on FCP slow-path EQ
9200 */
9201 while ((eqe = lpfc_sli4_eq_get(speq))) {
9202 lpfc_sli4_sp_handle_eqe(phba, eqe);
9203 if (!(++ecount % LPFC_GET_QE_REL_INT))
9204 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9205 }
9206
9207 /* Always clear and re-arm the slow-path EQ */
9208 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9209
9210 /* Catch the no cq entry condition */
9211 if (unlikely(ecount == 0)) {
9212 if (phba->intr_type == MSIX)
9213 /* MSI-X treated interrupt served as no EQ share INT */
9214 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9215 "0357 MSI-X interrupt with no EQE\n");
9216 else
9217 /* Non MSI-X treated on interrupt as EQ share INT */
9218 return IRQ_NONE;
9219 }
9220
9221 return IRQ_HANDLED;
9222} /* lpfc_sli4_sp_intr_handler */
9223
9224/**
9225 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9226 * @irq: Interrupt number.
9227 * @dev_id: The device context pointer.
9228 *
9229 * This function is directly called from the PCI layer as an interrupt
9230 * service routine when device with SLI-4 interface spec is enabled with
9231 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9232 * ring event in the HBA. However, when the device is enabled with either
9233 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9234 * device-level interrupt handler. When the PCI slot is in error recovery
9235 * or the HBA is undergoing initialization, the interrupt handler will not
9236 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9237 * the intrrupt context. This function is called without any lock held.
9238 * It gets the hbalock to access and update SLI data structures. Note that,
9239 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9240 * equal to that of FCP CQ index.
9241 *
9242 * This function returns IRQ_HANDLED when interrupt is handled else it
9243 * returns IRQ_NONE.
9244 **/
9245irqreturn_t
9246lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9247{
9248 struct lpfc_hba *phba;
9249 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9250 struct lpfc_queue *fpeq;
9251 struct lpfc_eqe *eqe;
9252 unsigned long iflag;
9253 int ecount = 0;
9254 uint32_t fcp_eqidx;
9255
9256 /* Get the driver's phba structure from the dev_id */
9257 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9258 phba = fcp_eq_hdl->phba;
9259 fcp_eqidx = fcp_eq_hdl->idx;
9260
9261 if (unlikely(!phba))
9262 return IRQ_NONE;
9263
9264 /* Get to the EQ struct associated with this vector */
9265 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9266
9267 /* Check device state for handling interrupt */
9268 if (unlikely(lpfc_intr_state_check(phba))) {
9269 /* Check again for link_state with lock held */
9270 spin_lock_irqsave(&phba->hbalock, iflag);
9271 if (phba->link_state < LPFC_LINK_DOWN)
9272 /* Flush, clear interrupt, and rearm the EQ */
9273 lpfc_sli4_eq_flush(phba, fpeq);
9274 spin_unlock_irqrestore(&phba->hbalock, iflag);
9275 return IRQ_NONE;
9276 }
9277
9278 /*
9279 * Process all the event on FCP fast-path EQ
9280 */
9281 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9282 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9283 if (!(++ecount % LPFC_GET_QE_REL_INT))
9284 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9285 }
9286
9287 /* Always clear and re-arm the fast-path EQ */
9288 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9289
9290 if (unlikely(ecount == 0)) {
9291 if (phba->intr_type == MSIX)
9292 /* MSI-X treated interrupt served as no EQ share INT */
9293 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9294 "0358 MSI-X interrupt with no EQE\n");
9295 else
9296 /* Non MSI-X treated on interrupt as EQ share INT */
9297 return IRQ_NONE;
9298 }
9299
9300 return IRQ_HANDLED;
9301} /* lpfc_sli4_fp_intr_handler */
9302
9303/**
9304 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9305 * @irq: Interrupt number.
9306 * @dev_id: The device context pointer.
9307 *
9308 * This function is the device-level interrupt handler to device with SLI-4
9309 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9310 * interrupt mode is enabled and there is an event in the HBA which requires
9311 * driver attention. This function invokes the slow-path interrupt attention
9312 * handling function and fast-path interrupt attention handling function in
9313 * turn to process the relevant HBA attention events. This function is called
9314 * without any lock held. It gets the hbalock to access and update SLI data
9315 * structures.
9316 *
9317 * This function returns IRQ_HANDLED when interrupt is handled, else it
9318 * returns IRQ_NONE.
9319 **/
9320irqreturn_t
9321lpfc_sli4_intr_handler(int irq, void *dev_id)
9322{
9323 struct lpfc_hba *phba;
9324 irqreturn_t sp_irq_rc, fp_irq_rc;
9325 bool fp_handled = false;
9326 uint32_t fcp_eqidx;
9327
9328 /* Get the driver's phba structure from the dev_id */
9329 phba = (struct lpfc_hba *)dev_id;
9330
9331 if (unlikely(!phba))
9332 return IRQ_NONE;
9333
9334 /*
9335 * Invokes slow-path host attention interrupt handling as appropriate.
9336 */
9337 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9338
9339 /*
9340 * Invoke fast-path host attention interrupt handling as appropriate.
9341 */
9342 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9343 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9344 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9345 if (fp_irq_rc == IRQ_HANDLED)
9346 fp_handled |= true;
9347 }
9348
9349 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9350} /* lpfc_sli4_intr_handler */
9351
9352/**
9353 * lpfc_sli4_queue_free - free a queue structure and associated memory
9354 * @queue: The queue structure to free.
9355 *
9356 * This function frees a queue structure and the DMAable memeory used for
9357 * the host resident queue. This function must be called after destroying the
9358 * queue on the HBA.
9359 **/
9360void
9361lpfc_sli4_queue_free(struct lpfc_queue *queue)
9362{
9363 struct lpfc_dmabuf *dmabuf;
9364
9365 if (!queue)
9366 return;
9367
9368 while (!list_empty(&queue->page_list)) {
9369 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9370 list);
9371 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9372 dmabuf->virt, dmabuf->phys);
9373 kfree(dmabuf);
9374 }
9375 kfree(queue);
9376 return;
9377}
9378
9379/**
9380 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9381 * @phba: The HBA that this queue is being created on.
9382 * @entry_size: The size of each queue entry for this queue.
9383 * @entry count: The number of entries that this queue will handle.
9384 *
9385 * This function allocates a queue structure and the DMAable memory used for
9386 * the host resident queue. This function must be called before creating the
9387 * queue on the HBA.
9388 **/
9389struct lpfc_queue *
9390lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9391 uint32_t entry_count)
9392{
9393 struct lpfc_queue *queue;
9394 struct lpfc_dmabuf *dmabuf;
9395 int x, total_qe_count;
9396 void *dma_pointer;
9397
9398
9399 queue = kzalloc(sizeof(struct lpfc_queue) +
9400 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9401 if (!queue)
9402 return NULL;
9403 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9404 INIT_LIST_HEAD(&queue->list);
9405 INIT_LIST_HEAD(&queue->page_list);
9406 INIT_LIST_HEAD(&queue->child_list);
9407 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9408 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9409 if (!dmabuf)
9410 goto out_fail;
9411 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9412 PAGE_SIZE, &dmabuf->phys,
9413 GFP_KERNEL);
9414 if (!dmabuf->virt) {
9415 kfree(dmabuf);
9416 goto out_fail;
9417 }
d11e31dd 9418 memset(dmabuf->virt, 0, PAGE_SIZE);
4f774513
JS
9419 dmabuf->buffer_tag = x;
9420 list_add_tail(&dmabuf->list, &queue->page_list);
9421 /* initialize queue's entry array */
9422 dma_pointer = dmabuf->virt;
9423 for (; total_qe_count < entry_count &&
9424 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9425 total_qe_count++, dma_pointer += entry_size) {
9426 queue->qe[total_qe_count].address = dma_pointer;
9427 }
9428 }
9429 queue->entry_size = entry_size;
9430 queue->entry_count = entry_count;
9431 queue->phba = phba;
9432
9433 return queue;
9434out_fail:
9435 lpfc_sli4_queue_free(queue);
9436 return NULL;
9437}
9438
9439/**
9440 * lpfc_eq_create - Create an Event Queue on the HBA
9441 * @phba: HBA structure that indicates port to create a queue on.
9442 * @eq: The queue structure to use to create the event queue.
9443 * @imax: The maximum interrupt per second limit.
9444 *
9445 * This function creates an event queue, as detailed in @eq, on a port,
9446 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9447 *
9448 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9449 * is used to get the entry count and entry size that are necessary to
9450 * determine the number of pages to allocate and use for this queue. This
9451 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9452 * event queue. This function is asynchronous and will wait for the mailbox
9453 * command to finish before continuing.
9454 *
9455 * On success this function will return a zero. If unable to allocate enough
9456 * memory this function will return ENOMEM. If the queue create mailbox command
9457 * fails this function will return ENXIO.
9458 **/
9459uint32_t
9460lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9461{
9462 struct lpfc_mbx_eq_create *eq_create;
9463 LPFC_MBOXQ_t *mbox;
9464 int rc, length, status = 0;
9465 struct lpfc_dmabuf *dmabuf;
9466 uint32_t shdr_status, shdr_add_status;
9467 union lpfc_sli4_cfg_shdr *shdr;
9468 uint16_t dmult;
9469
9470 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9471 if (!mbox)
9472 return -ENOMEM;
9473 length = (sizeof(struct lpfc_mbx_eq_create) -
9474 sizeof(struct lpfc_sli4_cfg_mhdr));
9475 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9476 LPFC_MBOX_OPCODE_EQ_CREATE,
9477 length, LPFC_SLI4_MBX_EMBED);
9478 eq_create = &mbox->u.mqe.un.eq_create;
9479 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9480 eq->page_count);
9481 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9482 LPFC_EQE_SIZE);
9483 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9484 /* Calculate delay multiper from maximum interrupt per second */
9485 dmult = LPFC_DMULT_CONST/imax - 1;
9486 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9487 dmult);
9488 switch (eq->entry_count) {
9489 default:
9490 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9491 "0360 Unsupported EQ count. (%d)\n",
9492 eq->entry_count);
9493 if (eq->entry_count < 256)
9494 return -EINVAL;
9495 /* otherwise default to smallest count (drop through) */
9496 case 256:
9497 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9498 LPFC_EQ_CNT_256);
9499 break;
9500 case 512:
9501 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9502 LPFC_EQ_CNT_512);
9503 break;
9504 case 1024:
9505 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9506 LPFC_EQ_CNT_1024);
9507 break;
9508 case 2048:
9509 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9510 LPFC_EQ_CNT_2048);
9511 break;
9512 case 4096:
9513 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9514 LPFC_EQ_CNT_4096);
9515 break;
9516 }
9517 list_for_each_entry(dmabuf, &eq->page_list, list) {
9518 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9519 putPaddrLow(dmabuf->phys);
9520 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9521 putPaddrHigh(dmabuf->phys);
9522 }
9523 mbox->vport = phba->pport;
9524 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9525 mbox->context1 = NULL;
9526 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9527 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9528 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9529 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9530 if (shdr_status || shdr_add_status || rc) {
9531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9532 "2500 EQ_CREATE mailbox failed with "
9533 "status x%x add_status x%x, mbx status x%x\n",
9534 shdr_status, shdr_add_status, rc);
9535 status = -ENXIO;
9536 }
9537 eq->type = LPFC_EQ;
9538 eq->subtype = LPFC_NONE;
9539 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9540 if (eq->queue_id == 0xFFFF)
9541 status = -ENXIO;
9542 eq->host_index = 0;
9543 eq->hba_index = 0;
9544
8fa38513 9545 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
9546 return status;
9547}
9548
9549/**
9550 * lpfc_cq_create - Create a Completion Queue on the HBA
9551 * @phba: HBA structure that indicates port to create a queue on.
9552 * @cq: The queue structure to use to create the completion queue.
9553 * @eq: The event queue to bind this completion queue to.
9554 *
9555 * This function creates a completion queue, as detailed in @wq, on a port,
9556 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9557 *
9558 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9559 * is used to get the entry count and entry size that are necessary to
9560 * determine the number of pages to allocate and use for this queue. The @eq
9561 * is used to indicate which event queue to bind this completion queue to. This
9562 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9563 * completion queue. This function is asynchronous and will wait for the mailbox
9564 * command to finish before continuing.
9565 *
9566 * On success this function will return a zero. If unable to allocate enough
9567 * memory this function will return ENOMEM. If the queue create mailbox command
9568 * fails this function will return ENXIO.
9569 **/
9570uint32_t
9571lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9572 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9573{
9574 struct lpfc_mbx_cq_create *cq_create;
9575 struct lpfc_dmabuf *dmabuf;
9576 LPFC_MBOXQ_t *mbox;
9577 int rc, length, status = 0;
9578 uint32_t shdr_status, shdr_add_status;
9579 union lpfc_sli4_cfg_shdr *shdr;
9580
9581 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9582 if (!mbox)
9583 return -ENOMEM;
9584 length = (sizeof(struct lpfc_mbx_cq_create) -
9585 sizeof(struct lpfc_sli4_cfg_mhdr));
9586 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9587 LPFC_MBOX_OPCODE_CQ_CREATE,
9588 length, LPFC_SLI4_MBX_EMBED);
9589 cq_create = &mbox->u.mqe.un.cq_create;
9590 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9591 cq->page_count);
9592 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9593 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9594 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9595 switch (cq->entry_count) {
9596 default:
9597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9598 "0361 Unsupported CQ count. (%d)\n",
9599 cq->entry_count);
9600 if (cq->entry_count < 256)
9601 return -EINVAL;
9602 /* otherwise default to smallest count (drop through) */
9603 case 256:
9604 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9605 LPFC_CQ_CNT_256);
9606 break;
9607 case 512:
9608 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9609 LPFC_CQ_CNT_512);
9610 break;
9611 case 1024:
9612 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9613 LPFC_CQ_CNT_1024);
9614 break;
9615 }
9616 list_for_each_entry(dmabuf, &cq->page_list, list) {
9617 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9618 putPaddrLow(dmabuf->phys);
9619 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9620 putPaddrHigh(dmabuf->phys);
9621 }
9622 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9623
9624 /* The IOCTL status is embedded in the mailbox subheader. */
9625 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9626 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9627 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9628 if (shdr_status || shdr_add_status || rc) {
9629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9630 "2501 CQ_CREATE mailbox failed with "
9631 "status x%x add_status x%x, mbx status x%x\n",
9632 shdr_status, shdr_add_status, rc);
9633 status = -ENXIO;
9634 goto out;
9635 }
9636 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9637 if (cq->queue_id == 0xFFFF) {
9638 status = -ENXIO;
9639 goto out;
9640 }
9641 /* link the cq onto the parent eq child list */
9642 list_add_tail(&cq->list, &eq->child_list);
9643 /* Set up completion queue's type and subtype */
9644 cq->type = type;
9645 cq->subtype = subtype;
9646 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9647 cq->host_index = 0;
9648 cq->hba_index = 0;
4f774513 9649
8fa38513
JS
9650out:
9651 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
9652 return status;
9653}
9654
04c68496
JS
9655/**
9656 * lpfc_mq_create - Create a mailbox Queue on the HBA
9657 * @phba: HBA structure that indicates port to create a queue on.
9658 * @mq: The queue structure to use to create the mailbox queue.
9659 *
9660 * This function creates a mailbox queue, as detailed in @mq, on a port,
9661 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9662 *
9663 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9664 * is used to get the entry count and entry size that are necessary to
9665 * determine the number of pages to allocate and use for this queue. This
9666 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9667 * mailbox queue. This function is asynchronous and will wait for the mailbox
9668 * command to finish before continuing.
9669 *
9670 * On success this function will return a zero. If unable to allocate enough
9671 * memory this function will return ENOMEM. If the queue create mailbox command
9672 * fails this function will return ENXIO.
9673 **/
9674uint32_t
9675lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9676 struct lpfc_queue *cq, uint32_t subtype)
9677{
9678 struct lpfc_mbx_mq_create *mq_create;
9679 struct lpfc_dmabuf *dmabuf;
9680 LPFC_MBOXQ_t *mbox;
9681 int rc, length, status = 0;
9682 uint32_t shdr_status, shdr_add_status;
9683 union lpfc_sli4_cfg_shdr *shdr;
9684
9685 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9686 if (!mbox)
9687 return -ENOMEM;
9688 length = (sizeof(struct lpfc_mbx_mq_create) -
9689 sizeof(struct lpfc_sli4_cfg_mhdr));
9690 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9691 LPFC_MBOX_OPCODE_MQ_CREATE,
9692 length, LPFC_SLI4_MBX_EMBED);
9693 mq_create = &mbox->u.mqe.un.mq_create;
9694 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9695 mq->page_count);
9696 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9697 cq->queue_id);
9698 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9699 switch (mq->entry_count) {
9700 default:
9701 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9702 "0362 Unsupported MQ count. (%d)\n",
9703 mq->entry_count);
9704 if (mq->entry_count < 16)
9705 return -EINVAL;
9706 /* otherwise default to smallest count (drop through) */
9707 case 16:
9708 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9709 LPFC_MQ_CNT_16);
9710 break;
9711 case 32:
9712 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9713 LPFC_MQ_CNT_32);
9714 break;
9715 case 64:
9716 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9717 LPFC_MQ_CNT_64);
9718 break;
9719 case 128:
9720 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9721 LPFC_MQ_CNT_128);
9722 break;
9723 }
9724 list_for_each_entry(dmabuf, &mq->page_list, list) {
9725 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9726 putPaddrLow(dmabuf->phys);
9727 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9728 putPaddrHigh(dmabuf->phys);
9729 }
9730 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9731 /* The IOCTL status is embedded in the mailbox subheader. */
9732 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9733 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9734 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9735 if (shdr_status || shdr_add_status || rc) {
9736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9737 "2502 MQ_CREATE mailbox failed with "
9738 "status x%x add_status x%x, mbx status x%x\n",
9739 shdr_status, shdr_add_status, rc);
9740 status = -ENXIO;
9741 goto out;
9742 }
9743 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9744 if (mq->queue_id == 0xFFFF) {
9745 status = -ENXIO;
9746 goto out;
9747 }
9748 mq->type = LPFC_MQ;
9749 mq->subtype = subtype;
9750 mq->host_index = 0;
9751 mq->hba_index = 0;
9752
9753 /* link the mq onto the parent cq child list */
9754 list_add_tail(&mq->list, &cq->child_list);
9755out:
8fa38513 9756 mempool_free(mbox, phba->mbox_mem_pool);
04c68496
JS
9757 return status;
9758}
9759
4f774513
JS
9760/**
9761 * lpfc_wq_create - Create a Work Queue on the HBA
9762 * @phba: HBA structure that indicates port to create a queue on.
9763 * @wq: The queue structure to use to create the work queue.
9764 * @cq: The completion queue to bind this work queue to.
9765 * @subtype: The subtype of the work queue indicating its functionality.
9766 *
9767 * This function creates a work queue, as detailed in @wq, on a port, described
9768 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9769 *
9770 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9771 * is used to get the entry count and entry size that are necessary to
9772 * determine the number of pages to allocate and use for this queue. The @cq
9773 * is used to indicate which completion queue to bind this work queue to. This
9774 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9775 * work queue. This function is asynchronous and will wait for the mailbox
9776 * command to finish before continuing.
9777 *
9778 * On success this function will return a zero. If unable to allocate enough
9779 * memory this function will return ENOMEM. If the queue create mailbox command
9780 * fails this function will return ENXIO.
9781 **/
9782uint32_t
9783lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9784 struct lpfc_queue *cq, uint32_t subtype)
9785{
9786 struct lpfc_mbx_wq_create *wq_create;
9787 struct lpfc_dmabuf *dmabuf;
9788 LPFC_MBOXQ_t *mbox;
9789 int rc, length, status = 0;
9790 uint32_t shdr_status, shdr_add_status;
9791 union lpfc_sli4_cfg_shdr *shdr;
9792
9793 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9794 if (!mbox)
9795 return -ENOMEM;
9796 length = (sizeof(struct lpfc_mbx_wq_create) -
9797 sizeof(struct lpfc_sli4_cfg_mhdr));
9798 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9799 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9800 length, LPFC_SLI4_MBX_EMBED);
9801 wq_create = &mbox->u.mqe.un.wq_create;
9802 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9803 wq->page_count);
9804 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9805 cq->queue_id);
9806 list_for_each_entry(dmabuf, &wq->page_list, list) {
9807 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9808 putPaddrLow(dmabuf->phys);
9809 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9810 putPaddrHigh(dmabuf->phys);
9811 }
9812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9813 /* The IOCTL status is embedded in the mailbox subheader. */
9814 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9815 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9816 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9817 if (shdr_status || shdr_add_status || rc) {
9818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9819 "2503 WQ_CREATE mailbox failed with "
9820 "status x%x add_status x%x, mbx status x%x\n",
9821 shdr_status, shdr_add_status, rc);
9822 status = -ENXIO;
9823 goto out;
9824 }
9825 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9826 if (wq->queue_id == 0xFFFF) {
9827 status = -ENXIO;
9828 goto out;
9829 }
9830 wq->type = LPFC_WQ;
9831 wq->subtype = subtype;
9832 wq->host_index = 0;
9833 wq->hba_index = 0;
9834
9835 /* link the wq onto the parent cq child list */
9836 list_add_tail(&wq->list, &cq->child_list);
9837out:
8fa38513 9838 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
9839 return status;
9840}
9841
9842/**
9843 * lpfc_rq_create - Create a Receive Queue on the HBA
9844 * @phba: HBA structure that indicates port to create a queue on.
9845 * @hrq: The queue structure to use to create the header receive queue.
9846 * @drq: The queue structure to use to create the data receive queue.
9847 * @cq: The completion queue to bind this work queue to.
9848 *
9849 * This function creates a receive buffer queue pair , as detailed in @hrq and
9850 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9851 * to the HBA.
9852 *
9853 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9854 * struct is used to get the entry count that is necessary to determine the
9855 * number of pages to use for this queue. The @cq is used to indicate which
9856 * completion queue to bind received buffers that are posted to these queues to.
9857 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9858 * receive queue pair. This function is asynchronous and will wait for the
9859 * mailbox command to finish before continuing.
9860 *
9861 * On success this function will return a zero. If unable to allocate enough
9862 * memory this function will return ENOMEM. If the queue create mailbox command
9863 * fails this function will return ENXIO.
9864 **/
9865uint32_t
9866lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9867 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9868{
9869 struct lpfc_mbx_rq_create *rq_create;
9870 struct lpfc_dmabuf *dmabuf;
9871 LPFC_MBOXQ_t *mbox;
9872 int rc, length, status = 0;
9873 uint32_t shdr_status, shdr_add_status;
9874 union lpfc_sli4_cfg_shdr *shdr;
9875
9876 if (hrq->entry_count != drq->entry_count)
9877 return -EINVAL;
9878 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9879 if (!mbox)
9880 return -ENOMEM;
9881 length = (sizeof(struct lpfc_mbx_rq_create) -
9882 sizeof(struct lpfc_sli4_cfg_mhdr));
9883 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9884 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9885 length, LPFC_SLI4_MBX_EMBED);
9886 rq_create = &mbox->u.mqe.un.rq_create;
9887 switch (hrq->entry_count) {
9888 default:
9889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9890 "2535 Unsupported RQ count. (%d)\n",
9891 hrq->entry_count);
9892 if (hrq->entry_count < 512)
9893 return -EINVAL;
9894 /* otherwise default to smallest count (drop through) */
9895 case 512:
9896 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9897 LPFC_RQ_RING_SIZE_512);
9898 break;
9899 case 1024:
9900 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9901 LPFC_RQ_RING_SIZE_1024);
9902 break;
9903 case 2048:
9904 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9905 LPFC_RQ_RING_SIZE_2048);
9906 break;
9907 case 4096:
9908 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9909 LPFC_RQ_RING_SIZE_4096);
9910 break;
9911 }
9912 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9913 cq->queue_id);
9914 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9915 hrq->page_count);
9916 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9917 LPFC_HDR_BUF_SIZE);
9918 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9919 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9920 putPaddrLow(dmabuf->phys);
9921 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9922 putPaddrHigh(dmabuf->phys);
9923 }
9924 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9925 /* The IOCTL status is embedded in the mailbox subheader. */
9926 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9927 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9928 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9929 if (shdr_status || shdr_add_status || rc) {
9930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9931 "2504 RQ_CREATE mailbox failed with "
9932 "status x%x add_status x%x, mbx status x%x\n",
9933 shdr_status, shdr_add_status, rc);
9934 status = -ENXIO;
9935 goto out;
9936 }
9937 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9938 if (hrq->queue_id == 0xFFFF) {
9939 status = -ENXIO;
9940 goto out;
9941 }
9942 hrq->type = LPFC_HRQ;
9943 hrq->subtype = subtype;
9944 hrq->host_index = 0;
9945 hrq->hba_index = 0;
9946
9947 /* now create the data queue */
9948 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9949 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9950 length, LPFC_SLI4_MBX_EMBED);
9951 switch (drq->entry_count) {
9952 default:
9953 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9954 "2536 Unsupported RQ count. (%d)\n",
9955 drq->entry_count);
9956 if (drq->entry_count < 512)
9957 return -EINVAL;
9958 /* otherwise default to smallest count (drop through) */
9959 case 512:
9960 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9961 LPFC_RQ_RING_SIZE_512);
9962 break;
9963 case 1024:
9964 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9965 LPFC_RQ_RING_SIZE_1024);
9966 break;
9967 case 2048:
9968 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9969 LPFC_RQ_RING_SIZE_2048);
9970 break;
9971 case 4096:
9972 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9973 LPFC_RQ_RING_SIZE_4096);
9974 break;
9975 }
9976 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9977 cq->queue_id);
9978 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9979 drq->page_count);
9980 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9981 LPFC_DATA_BUF_SIZE);
9982 list_for_each_entry(dmabuf, &drq->page_list, list) {
9983 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9984 putPaddrLow(dmabuf->phys);
9985 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9986 putPaddrHigh(dmabuf->phys);
9987 }
9988 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9989 /* The IOCTL status is embedded in the mailbox subheader. */
9990 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9993 if (shdr_status || shdr_add_status || rc) {
9994 status = -ENXIO;
9995 goto out;
9996 }
9997 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9998 if (drq->queue_id == 0xFFFF) {
9999 status = -ENXIO;
10000 goto out;
10001 }
10002 drq->type = LPFC_DRQ;
10003 drq->subtype = subtype;
10004 drq->host_index = 0;
10005 drq->hba_index = 0;
10006
10007 /* link the header and data RQs onto the parent cq child list */
10008 list_add_tail(&hrq->list, &cq->child_list);
10009 list_add_tail(&drq->list, &cq->child_list);
10010
10011out:
8fa38513 10012 mempool_free(mbox, phba->mbox_mem_pool);
4f774513
JS
10013 return status;
10014}
10015
10016/**
10017 * lpfc_eq_destroy - Destroy an event Queue on the HBA
10018 * @eq: The queue structure associated with the queue to destroy.
10019 *
10020 * This function destroys a queue, as detailed in @eq by sending an mailbox
10021 * command, specific to the type of queue, to the HBA.
10022 *
10023 * The @eq struct is used to get the queue ID of the queue to destroy.
10024 *
10025 * On success this function will return a zero. If the queue destroy mailbox
10026 * command fails this function will return ENXIO.
10027 **/
10028uint32_t
10029lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
10030{
10031 LPFC_MBOXQ_t *mbox;
10032 int rc, length, status = 0;
10033 uint32_t shdr_status, shdr_add_status;
10034 union lpfc_sli4_cfg_shdr *shdr;
10035
10036 if (!eq)
10037 return -ENODEV;
10038 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
10039 if (!mbox)
10040 return -ENOMEM;
10041 length = (sizeof(struct lpfc_mbx_eq_destroy) -
10042 sizeof(struct lpfc_sli4_cfg_mhdr));
10043 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10044 LPFC_MBOX_OPCODE_EQ_DESTROY,
10045 length, LPFC_SLI4_MBX_EMBED);
10046 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
10047 eq->queue_id);
10048 mbox->vport = eq->phba->pport;
10049 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10050
10051 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
10052 /* The IOCTL status is embedded in the mailbox subheader. */
10053 shdr = (union lpfc_sli4_cfg_shdr *)
10054 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
10055 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10056 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10057 if (shdr_status || shdr_add_status || rc) {
10058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10059 "2505 EQ_DESTROY mailbox failed with "
10060 "status x%x add_status x%x, mbx status x%x\n",
10061 shdr_status, shdr_add_status, rc);
10062 status = -ENXIO;
10063 }
10064
10065 /* Remove eq from any list */
10066 list_del_init(&eq->list);
8fa38513 10067 mempool_free(mbox, eq->phba->mbox_mem_pool);
4f774513
JS
10068 return status;
10069}
10070
10071/**
10072 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
10073 * @cq: The queue structure associated with the queue to destroy.
10074 *
10075 * This function destroys a queue, as detailed in @cq by sending an mailbox
10076 * command, specific to the type of queue, to the HBA.
10077 *
10078 * The @cq struct is used to get the queue ID of the queue to destroy.
10079 *
10080 * On success this function will return a zero. If the queue destroy mailbox
10081 * command fails this function will return ENXIO.
10082 **/
10083uint32_t
10084lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10085{
10086 LPFC_MBOXQ_t *mbox;
10087 int rc, length, status = 0;
10088 uint32_t shdr_status, shdr_add_status;
10089 union lpfc_sli4_cfg_shdr *shdr;
10090
10091 if (!cq)
10092 return -ENODEV;
10093 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
10094 if (!mbox)
10095 return -ENOMEM;
10096 length = (sizeof(struct lpfc_mbx_cq_destroy) -
10097 sizeof(struct lpfc_sli4_cfg_mhdr));
10098 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10099 LPFC_MBOX_OPCODE_CQ_DESTROY,
10100 length, LPFC_SLI4_MBX_EMBED);
10101 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
10102 cq->queue_id);
10103 mbox->vport = cq->phba->pport;
10104 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10105 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
10106 /* The IOCTL status is embedded in the mailbox subheader. */
10107 shdr = (union lpfc_sli4_cfg_shdr *)
10108 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
10109 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10110 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10111 if (shdr_status || shdr_add_status || rc) {
10112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10113 "2506 CQ_DESTROY mailbox failed with "
10114 "status x%x add_status x%x, mbx status x%x\n",
10115 shdr_status, shdr_add_status, rc);
10116 status = -ENXIO;
10117 }
10118 /* Remove cq from any list */
10119 list_del_init(&cq->list);
8fa38513 10120 mempool_free(mbox, cq->phba->mbox_mem_pool);
4f774513
JS
10121 return status;
10122}
10123
04c68496
JS
10124/**
10125 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
10126 * @qm: The queue structure associated with the queue to destroy.
10127 *
10128 * This function destroys a queue, as detailed in @mq by sending an mailbox
10129 * command, specific to the type of queue, to the HBA.
10130 *
10131 * The @mq struct is used to get the queue ID of the queue to destroy.
10132 *
10133 * On success this function will return a zero. If the queue destroy mailbox
10134 * command fails this function will return ENXIO.
10135 **/
10136uint32_t
10137lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10138{
10139 LPFC_MBOXQ_t *mbox;
10140 int rc, length, status = 0;
10141 uint32_t shdr_status, shdr_add_status;
10142 union lpfc_sli4_cfg_shdr *shdr;
10143
10144 if (!mq)
10145 return -ENODEV;
10146 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
10147 if (!mbox)
10148 return -ENOMEM;
10149 length = (sizeof(struct lpfc_mbx_mq_destroy) -
10150 sizeof(struct lpfc_sli4_cfg_mhdr));
10151 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10152 LPFC_MBOX_OPCODE_MQ_DESTROY,
10153 length, LPFC_SLI4_MBX_EMBED);
10154 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
10155 mq->queue_id);
10156 mbox->vport = mq->phba->pport;
10157 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10158 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
10159 /* The IOCTL status is embedded in the mailbox subheader. */
10160 shdr = (union lpfc_sli4_cfg_shdr *)
10161 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
10162 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10163 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10164 if (shdr_status || shdr_add_status || rc) {
10165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10166 "2507 MQ_DESTROY mailbox failed with "
10167 "status x%x add_status x%x, mbx status x%x\n",
10168 shdr_status, shdr_add_status, rc);
10169 status = -ENXIO;
10170 }
10171 /* Remove mq from any list */
10172 list_del_init(&mq->list);
8fa38513 10173 mempool_free(mbox, mq->phba->mbox_mem_pool);
04c68496
JS
10174 return status;
10175}
10176
4f774513
JS
10177/**
10178 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10179 * @wq: The queue structure associated with the queue to destroy.
10180 *
10181 * This function destroys a queue, as detailed in @wq by sending an mailbox
10182 * command, specific to the type of queue, to the HBA.
10183 *
10184 * The @wq struct is used to get the queue ID of the queue to destroy.
10185 *
10186 * On success this function will return a zero. If the queue destroy mailbox
10187 * command fails this function will return ENXIO.
10188 **/
10189uint32_t
10190lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10191{
10192 LPFC_MBOXQ_t *mbox;
10193 int rc, length, status = 0;
10194 uint32_t shdr_status, shdr_add_status;
10195 union lpfc_sli4_cfg_shdr *shdr;
10196
10197 if (!wq)
10198 return -ENODEV;
10199 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10200 if (!mbox)
10201 return -ENOMEM;
10202 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10203 sizeof(struct lpfc_sli4_cfg_mhdr));
10204 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10205 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10206 length, LPFC_SLI4_MBX_EMBED);
10207 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10208 wq->queue_id);
10209 mbox->vport = wq->phba->pport;
10210 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10211 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10212 shdr = (union lpfc_sli4_cfg_shdr *)
10213 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10214 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10215 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10216 if (shdr_status || shdr_add_status || rc) {
10217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10218 "2508 WQ_DESTROY mailbox failed with "
10219 "status x%x add_status x%x, mbx status x%x\n",
10220 shdr_status, shdr_add_status, rc);
10221 status = -ENXIO;
10222 }
10223 /* Remove wq from any list */
10224 list_del_init(&wq->list);
8fa38513 10225 mempool_free(mbox, wq->phba->mbox_mem_pool);
4f774513
JS
10226 return status;
10227}
10228
10229/**
10230 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10231 * @rq: The queue structure associated with the queue to destroy.
10232 *
10233 * This function destroys a queue, as detailed in @rq by sending an mailbox
10234 * command, specific to the type of queue, to the HBA.
10235 *
10236 * The @rq struct is used to get the queue ID of the queue to destroy.
10237 *
10238 * On success this function will return a zero. If the queue destroy mailbox
10239 * command fails this function will return ENXIO.
10240 **/
10241uint32_t
10242lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10243 struct lpfc_queue *drq)
10244{
10245 LPFC_MBOXQ_t *mbox;
10246 int rc, length, status = 0;
10247 uint32_t shdr_status, shdr_add_status;
10248 union lpfc_sli4_cfg_shdr *shdr;
10249
10250 if (!hrq || !drq)
10251 return -ENODEV;
10252 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10253 if (!mbox)
10254 return -ENOMEM;
10255 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10256 sizeof(struct mbox_header));
10257 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10258 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10259 length, LPFC_SLI4_MBX_EMBED);
10260 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10261 hrq->queue_id);
10262 mbox->vport = hrq->phba->pport;
10263 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10264 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10265 /* The IOCTL status is embedded in the mailbox subheader. */
10266 shdr = (union lpfc_sli4_cfg_shdr *)
10267 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10268 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10269 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10270 if (shdr_status || shdr_add_status || rc) {
10271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10272 "2509 RQ_DESTROY mailbox failed with "
10273 "status x%x add_status x%x, mbx status x%x\n",
10274 shdr_status, shdr_add_status, rc);
10275 if (rc != MBX_TIMEOUT)
10276 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10277 return -ENXIO;
10278 }
10279 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10280 drq->queue_id);
10281 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10282 shdr = (union lpfc_sli4_cfg_shdr *)
10283 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10284 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10285 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10286 if (shdr_status || shdr_add_status || rc) {
10287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10288 "2510 RQ_DESTROY mailbox failed with "
10289 "status x%x add_status x%x, mbx status x%x\n",
10290 shdr_status, shdr_add_status, rc);
10291 status = -ENXIO;
10292 }
10293 list_del_init(&hrq->list);
10294 list_del_init(&drq->list);
8fa38513 10295 mempool_free(mbox, hrq->phba->mbox_mem_pool);
4f774513
JS
10296 return status;
10297}
10298
10299/**
10300 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10301 * @phba: The virtual port for which this call being executed.
10302 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10303 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10304 * @xritag: the xritag that ties this io to the SGL pages.
10305 *
10306 * This routine will post the sgl pages for the IO that has the xritag
10307 * that is in the iocbq structure. The xritag is assigned during iocbq
10308 * creation and persists for as long as the driver is loaded.
10309 * if the caller has fewer than 256 scatter gather segments to map then
10310 * pdma_phys_addr1 should be 0.
10311 * If the caller needs to map more than 256 scatter gather segment then
10312 * pdma_phys_addr1 should be a valid physical address.
10313 * physical address for SGLs must be 64 byte aligned.
10314 * If you are going to map 2 SGL's then the first one must have 256 entries
10315 * the second sgl can have between 1 and 256 entries.
10316 *
10317 * Return codes:
10318 * 0 - Success
10319 * -ENXIO, -ENOMEM - Failure
10320 **/
10321int
10322lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10323 dma_addr_t pdma_phys_addr0,
10324 dma_addr_t pdma_phys_addr1,
10325 uint16_t xritag)
10326{
10327 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10328 LPFC_MBOXQ_t *mbox;
10329 int rc;
10330 uint32_t shdr_status, shdr_add_status;
10331 union lpfc_sli4_cfg_shdr *shdr;
10332
10333 if (xritag == NO_XRI) {
10334 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10335 "0364 Invalid param:\n");
10336 return -EINVAL;
10337 }
10338
10339 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10340 if (!mbox)
10341 return -ENOMEM;
10342
10343 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10344 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10345 sizeof(struct lpfc_mbx_post_sgl_pages) -
10346 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10347
10348 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10349 &mbox->u.mqe.un.post_sgl_pages;
10350 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10351 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10352
10353 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10354 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10355 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10356 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10357
10358 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10359 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10360 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10361 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10362 if (!phba->sli4_hba.intr_enable)
10363 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10364 else
10365 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10366 /* The IOCTL status is embedded in the mailbox subheader. */
10367 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10368 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10369 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10370 if (rc != MBX_TIMEOUT)
10371 mempool_free(mbox, phba->mbox_mem_pool);
10372 if (shdr_status || shdr_add_status || rc) {
10373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10374 "2511 POST_SGL mailbox failed with "
10375 "status x%x add_status x%x, mbx status x%x\n",
10376 shdr_status, shdr_add_status, rc);
10377 rc = -ENXIO;
10378 }
10379 return 0;
10380}
10381/**
10382 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10383 * @phba: The virtual port for which this call being executed.
10384 *
10385 * This routine will remove all of the sgl pages registered with the hba.
10386 *
10387 * Return codes:
10388 * 0 - Success
10389 * -ENXIO, -ENOMEM - Failure
10390 **/
10391int
10392lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10393{
10394 LPFC_MBOXQ_t *mbox;
10395 int rc;
10396 uint32_t shdr_status, shdr_add_status;
10397 union lpfc_sli4_cfg_shdr *shdr;
10398
10399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10400 if (!mbox)
10401 return -ENOMEM;
10402
10403 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10404 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10405 LPFC_SLI4_MBX_EMBED);
10406 if (!phba->sli4_hba.intr_enable)
10407 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10408 else
10409 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10410 /* The IOCTL status is embedded in the mailbox subheader. */
10411 shdr = (union lpfc_sli4_cfg_shdr *)
10412 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10413 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10414 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10415 if (rc != MBX_TIMEOUT)
10416 mempool_free(mbox, phba->mbox_mem_pool);
10417 if (shdr_status || shdr_add_status || rc) {
10418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10419 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10420 "status x%x add_status x%x, mbx status x%x\n",
10421 shdr_status, shdr_add_status, rc);
10422 rc = -ENXIO;
10423 }
10424 return rc;
10425}
10426
10427/**
10428 * lpfc_sli4_next_xritag - Get an xritag for the io
10429 * @phba: Pointer to HBA context object.
10430 *
10431 * This function gets an xritag for the iocb. If there is no unused xritag
10432 * it will return 0xffff.
10433 * The function returns the allocated xritag if successful, else returns zero.
10434 * Zero is not a valid xritag.
10435 * The caller is not required to hold any lock.
10436 **/
10437uint16_t
10438lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10439{
10440 uint16_t xritag;
10441
10442 spin_lock_irq(&phba->hbalock);
10443 xritag = phba->sli4_hba.next_xri;
10444 if ((xritag != (uint16_t) -1) && xritag <
10445 (phba->sli4_hba.max_cfg_param.max_xri
10446 + phba->sli4_hba.max_cfg_param.xri_base)) {
10447 phba->sli4_hba.next_xri++;
10448 phba->sli4_hba.max_cfg_param.xri_used++;
10449 spin_unlock_irq(&phba->hbalock);
10450 return xritag;
10451 }
10452 spin_unlock_irq(&phba->hbalock);
6a9c52cf 10453 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4f774513
JS
10454 "2004 Failed to allocate XRI.last XRITAG is %d"
10455 " Max XRI is %d, Used XRI is %d\n",
10456 phba->sli4_hba.next_xri,
10457 phba->sli4_hba.max_cfg_param.max_xri,
10458 phba->sli4_hba.max_cfg_param.xri_used);
10459 return -1;
10460}
10461
10462/**
10463 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10464 * @phba: pointer to lpfc hba data structure.
10465 *
10466 * This routine is invoked to post a block of driver's sgl pages to the
10467 * HBA using non-embedded mailbox command. No Lock is held. This routine
10468 * is only called when the driver is loading and after all IO has been
10469 * stopped.
10470 **/
10471int
10472lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10473{
10474 struct lpfc_sglq *sglq_entry;
10475 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10476 struct sgl_page_pairs *sgl_pg_pairs;
10477 void *viraddr;
10478 LPFC_MBOXQ_t *mbox;
10479 uint32_t reqlen, alloclen, pg_pairs;
10480 uint32_t mbox_tmo;
10481 uint16_t xritag_start = 0;
10482 int els_xri_cnt, rc = 0;
10483 uint32_t shdr_status, shdr_add_status;
10484 union lpfc_sli4_cfg_shdr *shdr;
10485
10486 /* The number of sgls to be posted */
10487 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10488
10489 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10490 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10491 if (reqlen > PAGE_SIZE) {
10492 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10493 "2559 Block sgl registration required DMA "
10494 "size (%d) great than a page\n", reqlen);
10495 return -ENOMEM;
10496 }
10497 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10498 if (!mbox) {
10499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10500 "2560 Failed to allocate mbox cmd memory\n");
10501 return -ENOMEM;
10502 }
10503
10504 /* Allocate DMA memory and set up the non-embedded mailbox command */
10505 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10506 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10507 LPFC_SLI4_MBX_NEMBED);
10508
10509 if (alloclen < reqlen) {
10510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10511 "0285 Allocated DMA memory size (%d) is "
10512 "less than the requested DMA memory "
10513 "size (%d)\n", alloclen, reqlen);
10514 lpfc_sli4_mbox_cmd_free(phba, mbox);
10515 return -ENOMEM;
10516 }
4f774513 10517 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
10518 viraddr = mbox->sge_array->addr[0];
10519
10520 /* Set up the SGL pages in the non-embedded DMA pages */
10521 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10522 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10523
10524 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10525 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10526 /* Set up the sge entry */
10527 sgl_pg_pairs->sgl_pg0_addr_lo =
10528 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10529 sgl_pg_pairs->sgl_pg0_addr_hi =
10530 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10531 sgl_pg_pairs->sgl_pg1_addr_lo =
10532 cpu_to_le32(putPaddrLow(0));
10533 sgl_pg_pairs->sgl_pg1_addr_hi =
10534 cpu_to_le32(putPaddrHigh(0));
10535 /* Keep the first xritag on the list */
10536 if (pg_pairs == 0)
10537 xritag_start = sglq_entry->sli4_xritag;
10538 sgl_pg_pairs++;
10539 }
10540 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
6a9c52cf 10541 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
4f774513
JS
10542 /* Perform endian conversion if necessary */
10543 sgl->word0 = cpu_to_le32(sgl->word0);
10544
10545 if (!phba->sli4_hba.intr_enable)
10546 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10547 else {
10548 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10549 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10550 }
10551 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10552 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10553 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10554 if (rc != MBX_TIMEOUT)
10555 lpfc_sli4_mbox_cmd_free(phba, mbox);
10556 if (shdr_status || shdr_add_status || rc) {
10557 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10558 "2513 POST_SGL_BLOCK mailbox command failed "
10559 "status x%x add_status x%x mbx status x%x\n",
10560 shdr_status, shdr_add_status, rc);
10561 rc = -ENXIO;
10562 }
10563 return rc;
10564}
10565
10566/**
10567 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10568 * @phba: pointer to lpfc hba data structure.
10569 * @sblist: pointer to scsi buffer list.
10570 * @count: number of scsi buffers on the list.
10571 *
10572 * This routine is invoked to post a block of @count scsi sgl pages from a
10573 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10574 * No Lock is held.
10575 *
10576 **/
10577int
10578lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10579 int cnt)
10580{
10581 struct lpfc_scsi_buf *psb;
10582 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10583 struct sgl_page_pairs *sgl_pg_pairs;
10584 void *viraddr;
10585 LPFC_MBOXQ_t *mbox;
10586 uint32_t reqlen, alloclen, pg_pairs;
10587 uint32_t mbox_tmo;
10588 uint16_t xritag_start = 0;
10589 int rc = 0;
10590 uint32_t shdr_status, shdr_add_status;
10591 dma_addr_t pdma_phys_bpl1;
10592 union lpfc_sli4_cfg_shdr *shdr;
10593
10594 /* Calculate the requested length of the dma memory */
10595 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10596 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10597 if (reqlen > PAGE_SIZE) {
10598 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10599 "0217 Block sgl registration required DMA "
10600 "size (%d) great than a page\n", reqlen);
10601 return -ENOMEM;
10602 }
10603 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10604 if (!mbox) {
10605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10606 "0283 Failed to allocate mbox cmd memory\n");
10607 return -ENOMEM;
10608 }
10609
10610 /* Allocate DMA memory and set up the non-embedded mailbox command */
10611 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10612 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10613 LPFC_SLI4_MBX_NEMBED);
10614
10615 if (alloclen < reqlen) {
10616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10617 "2561 Allocated DMA memory size (%d) is "
10618 "less than the requested DMA memory "
10619 "size (%d)\n", alloclen, reqlen);
10620 lpfc_sli4_mbox_cmd_free(phba, mbox);
10621 return -ENOMEM;
10622 }
4f774513 10623 /* Get the first SGE entry from the non-embedded DMA memory */
4f774513
JS
10624 viraddr = mbox->sge_array->addr[0];
10625
10626 /* Set up the SGL pages in the non-embedded DMA pages */
10627 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10628 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10629
10630 pg_pairs = 0;
10631 list_for_each_entry(psb, sblist, list) {
10632 /* Set up the sge entry */
10633 sgl_pg_pairs->sgl_pg0_addr_lo =
10634 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10635 sgl_pg_pairs->sgl_pg0_addr_hi =
10636 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10637 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10638 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10639 else
10640 pdma_phys_bpl1 = 0;
10641 sgl_pg_pairs->sgl_pg1_addr_lo =
10642 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10643 sgl_pg_pairs->sgl_pg1_addr_hi =
10644 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10645 /* Keep the first xritag on the list */
10646 if (pg_pairs == 0)
10647 xritag_start = psb->cur_iocbq.sli4_xritag;
10648 sgl_pg_pairs++;
10649 pg_pairs++;
10650 }
10651 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10652 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10653 /* Perform endian conversion if necessary */
10654 sgl->word0 = cpu_to_le32(sgl->word0);
10655
10656 if (!phba->sli4_hba.intr_enable)
10657 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10658 else {
10659 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10660 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10661 }
10662 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10663 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10664 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10665 if (rc != MBX_TIMEOUT)
10666 lpfc_sli4_mbox_cmd_free(phba, mbox);
10667 if (shdr_status || shdr_add_status || rc) {
10668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10669 "2564 POST_SGL_BLOCK mailbox command failed "
10670 "status x%x add_status x%x mbx status x%x\n",
10671 shdr_status, shdr_add_status, rc);
10672 rc = -ENXIO;
10673 }
10674 return rc;
10675}
10676
10677/**
10678 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10679 * @phba: pointer to lpfc_hba struct that the frame was received on
10680 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10681 *
10682 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10683 * valid type of frame that the LPFC driver will handle. This function will
10684 * return a zero if the frame is a valid frame or a non zero value when the
10685 * frame does not pass the check.
10686 **/
10687static int
10688lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10689{
10690 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10691 char *type_names[] = FC_TYPE_NAMES_INIT;
10692 struct fc_vft_header *fc_vft_hdr;
10693
10694 switch (fc_hdr->fh_r_ctl) {
10695 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10696 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10697 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10698 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10699 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10700 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10701 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10702 case FC_RCTL_DD_CMD_STATUS: /* command status */
10703 case FC_RCTL_ELS_REQ: /* extended link services request */
10704 case FC_RCTL_ELS_REP: /* extended link services reply */
10705 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10706 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10707 case FC_RCTL_BA_NOP: /* basic link service NOP */
10708 case FC_RCTL_BA_ABTS: /* basic link service abort */
10709 case FC_RCTL_BA_RMC: /* remove connection */
10710 case FC_RCTL_BA_ACC: /* basic accept */
10711 case FC_RCTL_BA_RJT: /* basic reject */
10712 case FC_RCTL_BA_PRMT:
10713 case FC_RCTL_ACK_1: /* acknowledge_1 */
10714 case FC_RCTL_ACK_0: /* acknowledge_0 */
10715 case FC_RCTL_P_RJT: /* port reject */
10716 case FC_RCTL_F_RJT: /* fabric reject */
10717 case FC_RCTL_P_BSY: /* port busy */
10718 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10719 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10720 case FC_RCTL_LCR: /* link credit reset */
10721 case FC_RCTL_END: /* end */
10722 break;
10723 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10724 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10725 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10726 return lpfc_fc_frame_check(phba, fc_hdr);
10727 default:
10728 goto drop;
10729 }
10730 switch (fc_hdr->fh_type) {
10731 case FC_TYPE_BLS:
10732 case FC_TYPE_ELS:
10733 case FC_TYPE_FCP:
10734 case FC_TYPE_CT:
10735 break;
10736 case FC_TYPE_IP:
10737 case FC_TYPE_ILS:
10738 default:
10739 goto drop;
10740 }
10741 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10742 "2538 Received frame rctl:%s type:%s\n",
10743 rctl_names[fc_hdr->fh_r_ctl],
10744 type_names[fc_hdr->fh_type]);
10745 return 0;
10746drop:
10747 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10748 "2539 Dropped frame rctl:%s type:%s\n",
10749 rctl_names[fc_hdr->fh_r_ctl],
10750 type_names[fc_hdr->fh_type]);
10751 return 1;
10752}
10753
10754/**
10755 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10756 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10757 *
10758 * This function processes the FC header to retrieve the VFI from the VF
10759 * header, if one exists. This function will return the VFI if one exists
10760 * or 0 if no VSAN Header exists.
10761 **/
10762static uint32_t
10763lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10764{
10765 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10766
10767 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10768 return 0;
10769 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10770}
10771
10772/**
10773 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10774 * @phba: Pointer to the HBA structure to search for the vport on
10775 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10776 * @fcfi: The FC Fabric ID that the frame came from
10777 *
10778 * This function searches the @phba for a vport that matches the content of the
10779 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10780 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10781 * returns the matching vport pointer or NULL if unable to match frame to a
10782 * vport.
10783 **/
10784static struct lpfc_vport *
10785lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10786 uint16_t fcfi)
10787{
10788 struct lpfc_vport **vports;
10789 struct lpfc_vport *vport = NULL;
10790 int i;
10791 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10792 fc_hdr->fh_d_id[1] << 8 |
10793 fc_hdr->fh_d_id[2]);
10794
10795 vports = lpfc_create_vport_work_array(phba);
10796 if (vports != NULL)
10797 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10798 if (phba->fcf.fcfi == fcfi &&
10799 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10800 vports[i]->fc_myDID == did) {
10801 vport = vports[i];
10802 break;
10803 }
10804 }
10805 lpfc_destroy_vport_work_array(phba, vports);
10806 return vport;
10807}
10808
45ed1190
JS
10809/**
10810 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10811 * @vport: The vport to work on.
10812 *
10813 * This function updates the receive sequence time stamp for this vport. The
10814 * receive sequence time stamp indicates the time that the last frame of the
10815 * the sequence that has been idle for the longest amount of time was received.
10816 * the driver uses this time stamp to indicate if any received sequences have
10817 * timed out.
10818 **/
10819void
10820lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10821{
10822 struct lpfc_dmabuf *h_buf;
10823 struct hbq_dmabuf *dmabuf = NULL;
10824
10825 /* get the oldest sequence on the rcv list */
10826 h_buf = list_get_first(&vport->rcv_buffer_list,
10827 struct lpfc_dmabuf, list);
10828 if (!h_buf)
10829 return;
10830 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10831 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10832}
10833
10834/**
10835 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10836 * @vport: The vport that the received sequences were sent to.
10837 *
10838 * This function cleans up all outstanding received sequences. This is called
10839 * by the driver when a link event or user action invalidates all the received
10840 * sequences.
10841 **/
10842void
10843lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10844{
10845 struct lpfc_dmabuf *h_buf, *hnext;
10846 struct lpfc_dmabuf *d_buf, *dnext;
10847 struct hbq_dmabuf *dmabuf = NULL;
10848
10849 /* start with the oldest sequence on the rcv list */
10850 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10851 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10852 list_del_init(&dmabuf->hbuf.list);
10853 list_for_each_entry_safe(d_buf, dnext,
10854 &dmabuf->dbuf.list, list) {
10855 list_del_init(&d_buf->list);
10856 lpfc_in_buf_free(vport->phba, d_buf);
10857 }
10858 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10859 }
10860}
10861
10862/**
10863 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
10864 * @vport: The vport that the received sequences were sent to.
10865 *
10866 * This function determines whether any received sequences have timed out by
10867 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
10868 * indicates that there is at least one timed out sequence this routine will
10869 * go through the received sequences one at a time from most inactive to most
10870 * active to determine which ones need to be cleaned up. Once it has determined
10871 * that a sequence needs to be cleaned up it will simply free up the resources
10872 * without sending an abort.
10873 **/
10874void
10875lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
10876{
10877 struct lpfc_dmabuf *h_buf, *hnext;
10878 struct lpfc_dmabuf *d_buf, *dnext;
10879 struct hbq_dmabuf *dmabuf = NULL;
10880 unsigned long timeout;
10881 int abort_count = 0;
10882
10883 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10884 vport->rcv_buffer_time_stamp);
10885 if (list_empty(&vport->rcv_buffer_list) ||
10886 time_before(jiffies, timeout))
10887 return;
10888 /* start with the oldest sequence on the rcv list */
10889 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10890 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10891 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10892 dmabuf->time_stamp);
10893 if (time_before(jiffies, timeout))
10894 break;
10895 abort_count++;
10896 list_del_init(&dmabuf->hbuf.list);
10897 list_for_each_entry_safe(d_buf, dnext,
10898 &dmabuf->dbuf.list, list) {
10899 list_del_init(&d_buf->list);
10900 lpfc_in_buf_free(vport->phba, d_buf);
10901 }
10902 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10903 }
10904 if (abort_count)
10905 lpfc_update_rcv_time_stamp(vport);
10906}
10907
4f774513
JS
10908/**
10909 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10910 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10911 *
10912 * This function searches through the existing incomplete sequences that have
10913 * been sent to this @vport. If the frame matches one of the incomplete
10914 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10915 * make up that sequence. If no sequence is found that matches this frame then
10916 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10917 * This function returns a pointer to the first dmabuf in the sequence list that
10918 * the frame was linked to.
10919 **/
10920static struct hbq_dmabuf *
10921lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10922{
10923 struct fc_frame_header *new_hdr;
10924 struct fc_frame_header *temp_hdr;
10925 struct lpfc_dmabuf *d_buf;
10926 struct lpfc_dmabuf *h_buf;
10927 struct hbq_dmabuf *seq_dmabuf = NULL;
10928 struct hbq_dmabuf *temp_dmabuf = NULL;
10929
4d9ab994 10930 INIT_LIST_HEAD(&dmabuf->dbuf.list);
45ed1190 10931 dmabuf->time_stamp = jiffies;
4f774513
JS
10932 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10933 /* Use the hdr_buf to find the sequence that this frame belongs to */
10934 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10935 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10936 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10937 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10938 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10939 continue;
10940 /* found a pending sequence that matches this frame */
10941 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10942 break;
10943 }
10944 if (!seq_dmabuf) {
10945 /*
10946 * This indicates first frame received for this sequence.
10947 * Queue the buffer on the vport's rcv_buffer_list.
10948 */
10949 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
45ed1190 10950 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
10951 return dmabuf;
10952 }
10953 temp_hdr = seq_dmabuf->hbuf.virt;
eeead811
JS
10954 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
10955 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4d9ab994
JS
10956 list_del_init(&seq_dmabuf->hbuf.list);
10957 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10958 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
45ed1190 10959 lpfc_update_rcv_time_stamp(vport);
4f774513
JS
10960 return dmabuf;
10961 }
45ed1190
JS
10962 /* move this sequence to the tail to indicate a young sequence */
10963 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
10964 seq_dmabuf->time_stamp = jiffies;
10965 lpfc_update_rcv_time_stamp(vport);
eeead811
JS
10966 if (list_empty(&seq_dmabuf->dbuf.list)) {
10967 temp_hdr = dmabuf->hbuf.virt;
10968 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10969 return seq_dmabuf;
10970 }
4f774513
JS
10971 /* find the correct place in the sequence to insert this frame */
10972 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10973 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10974 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10975 /*
10976 * If the frame's sequence count is greater than the frame on
10977 * the list then insert the frame right after this frame
10978 */
eeead811
JS
10979 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
10980 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
4f774513
JS
10981 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10982 return seq_dmabuf;
10983 }
10984 }
10985 return NULL;
10986}
10987
6669f9bb
JS
10988/**
10989 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
10990 * @vport: pointer to a vitural port
10991 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10992 *
10993 * This function tries to abort from the partially assembed sequence, described
10994 * by the information from basic abbort @dmabuf. It checks to see whether such
10995 * partially assembled sequence held by the driver. If so, it shall free up all
10996 * the frames from the partially assembled sequence.
10997 *
10998 * Return
10999 * true -- if there is matching partially assembled sequence present and all
11000 * the frames freed with the sequence;
11001 * false -- if there is no matching partially assembled sequence present so
11002 * nothing got aborted in the lower layer driver
11003 **/
11004static bool
11005lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
11006 struct hbq_dmabuf *dmabuf)
11007{
11008 struct fc_frame_header *new_hdr;
11009 struct fc_frame_header *temp_hdr;
11010 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
11011 struct hbq_dmabuf *seq_dmabuf = NULL;
11012
11013 /* Use the hdr_buf to find the sequence that matches this frame */
11014 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11015 INIT_LIST_HEAD(&dmabuf->hbuf.list);
11016 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11017 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
11018 temp_hdr = (struct fc_frame_header *)h_buf->virt;
11019 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
11020 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
11021 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
11022 continue;
11023 /* found a pending sequence that matches this frame */
11024 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11025 break;
11026 }
11027
11028 /* Free up all the frames from the partially assembled sequence */
11029 if (seq_dmabuf) {
11030 list_for_each_entry_safe(d_buf, n_buf,
11031 &seq_dmabuf->dbuf.list, list) {
11032 list_del_init(&d_buf->list);
11033 lpfc_in_buf_free(vport->phba, d_buf);
11034 }
11035 return true;
11036 }
11037 return false;
11038}
11039
11040/**
11041 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
11042 * @phba: Pointer to HBA context object.
11043 * @cmd_iocbq: pointer to the command iocbq structure.
11044 * @rsp_iocbq: pointer to the response iocbq structure.
11045 *
11046 * This function handles the sequence abort accept iocb command complete
11047 * event. It properly releases the memory allocated to the sequence abort
11048 * accept iocb.
11049 **/
11050static void
11051lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
11052 struct lpfc_iocbq *cmd_iocbq,
11053 struct lpfc_iocbq *rsp_iocbq)
11054{
11055 if (cmd_iocbq)
11056 lpfc_sli_release_iocbq(phba, cmd_iocbq);
11057}
11058
11059/**
11060 * lpfc_sli4_seq_abort_acc - Accept sequence abort
11061 * @phba: Pointer to HBA context object.
11062 * @fc_hdr: pointer to a FC frame header.
11063 *
11064 * This function sends a basic accept to a previous unsol sequence abort
11065 * event after aborting the sequence handling.
11066 **/
11067static void
11068lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11069 struct fc_frame_header *fc_hdr)
11070{
11071 struct lpfc_iocbq *ctiocb = NULL;
11072 struct lpfc_nodelist *ndlp;
5ffc266e
JS
11073 uint16_t oxid, rxid;
11074 uint32_t sid, fctl;
6669f9bb
JS
11075 IOCB_t *icmd;
11076
11077 if (!lpfc_is_link_up(phba))
11078 return;
11079
11080 sid = sli4_sid_from_fc_hdr(fc_hdr);
11081 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
5ffc266e 11082 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
6669f9bb
JS
11083
11084 ndlp = lpfc_findnode_did(phba->pport, sid);
11085 if (!ndlp) {
11086 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
11087 "1268 Find ndlp returned NULL for oxid:x%x "
11088 "SID:x%x\n", oxid, sid);
11089 return;
11090 }
11091
11092 /* Allocate buffer for acc iocb */
11093 ctiocb = lpfc_sli_get_iocbq(phba);
11094 if (!ctiocb)
11095 return;
11096
5ffc266e
JS
11097 /* Extract the F_CTL field from FC_HDR */
11098 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
11099
6669f9bb 11100 icmd = &ctiocb->iocb;
6669f9bb 11101 icmd->un.xseq64.bdl.bdeSize = 0;
5ffc266e 11102 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6669f9bb
JS
11103 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11104 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
11105 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
11106
11107 /* Fill in the rest of iocb fields */
11108 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
11109 icmd->ulpBdeCount = 0;
11110 icmd->ulpLe = 1;
11111 icmd->ulpClass = CLASS3;
11112 icmd->ulpContext = ndlp->nlp_rpi;
6669f9bb 11113
6669f9bb
JS
11114 ctiocb->iocb_cmpl = NULL;
11115 ctiocb->vport = phba->pport;
11116 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
11117
5ffc266e
JS
11118 if (fctl & FC_FC_EX_CTX) {
11119 /* ABTS sent by responder to CT exchange, construction
11120 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
11121 * field and RX_ID from ABTS for RX_ID field.
11122 */
11123 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
11124 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
11125 ctiocb->sli4_xritag = oxid;
11126 } else {
11127 /* ABTS sent by initiator to CT exchange, construction
11128 * of BA_ACC will need to allocate a new XRI as for the
11129 * XRI_TAG and RX_ID fields.
11130 */
11131 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
11132 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
11133 ctiocb->sli4_xritag = NO_XRI;
11134 }
11135 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
11136
6669f9bb
JS
11137 /* Xmit CT abts accept on exchange <xid> */
11138 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11139 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11140 CMD_XMIT_BLS_RSP64_CX, phba->link_state);
11141 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
11142}
11143
11144/**
11145 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11146 * @vport: Pointer to the vport on which this sequence was received
11147 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11148 *
11149 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11150 * receive sequence is only partially assembed by the driver, it shall abort
11151 * the partially assembled frames for the sequence. Otherwise, if the
11152 * unsolicited receive sequence has been completely assembled and passed to
11153 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11154 * unsolicited sequence has been aborted. After that, it will issue a basic
11155 * accept to accept the abort.
11156 **/
11157void
11158lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
11159 struct hbq_dmabuf *dmabuf)
11160{
11161 struct lpfc_hba *phba = vport->phba;
11162 struct fc_frame_header fc_hdr;
5ffc266e 11163 uint32_t fctl;
6669f9bb
JS
11164 bool abts_par;
11165
6669f9bb
JS
11166 /* Make a copy of fc_hdr before the dmabuf being released */
11167 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
5ffc266e 11168 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
6669f9bb 11169
5ffc266e
JS
11170 if (fctl & FC_FC_EX_CTX) {
11171 /*
11172 * ABTS sent by responder to exchange, just free the buffer
11173 */
6669f9bb 11174 lpfc_in_buf_free(phba, &dmabuf->dbuf);
5ffc266e
JS
11175 } else {
11176 /*
11177 * ABTS sent by initiator to exchange, need to do cleanup
11178 */
11179 /* Try to abort partially assembled seq */
11180 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
11181
11182 /* Send abort to ULP if partially seq abort failed */
11183 if (abts_par == false)
11184 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
11185 else
11186 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11187 }
6669f9bb
JS
11188 /* Send basic accept (BA_ACC) to the abort requester */
11189 lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
11190}
11191
4f774513
JS
11192/**
11193 * lpfc_seq_complete - Indicates if a sequence is complete
11194 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11195 *
11196 * This function checks the sequence, starting with the frame described by
11197 * @dmabuf, to see if all the frames associated with this sequence are present.
11198 * the frames associated with this sequence are linked to the @dmabuf using the
11199 * dbuf list. This function looks for two major things. 1) That the first frame
11200 * has a sequence count of zero. 2) There is a frame with last frame of sequence
11201 * set. 3) That there are no holes in the sequence count. The function will
11202 * return 1 when the sequence is complete, otherwise it will return 0.
11203 **/
11204static int
11205lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
11206{
11207 struct fc_frame_header *hdr;
11208 struct lpfc_dmabuf *d_buf;
11209 struct hbq_dmabuf *seq_dmabuf;
11210 uint32_t fctl;
11211 int seq_count = 0;
11212
11213 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11214 /* make sure first fame of sequence has a sequence count of zero */
11215 if (hdr->fh_seq_cnt != seq_count)
11216 return 0;
11217 fctl = (hdr->fh_f_ctl[0] << 16 |
11218 hdr->fh_f_ctl[1] << 8 |
11219 hdr->fh_f_ctl[2]);
11220 /* If last frame of sequence we can return success. */
11221 if (fctl & FC_FC_END_SEQ)
11222 return 1;
11223 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
11224 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
11225 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11226 /* If there is a hole in the sequence count then fail. */
eeead811 11227 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
4f774513
JS
11228 return 0;
11229 fctl = (hdr->fh_f_ctl[0] << 16 |
11230 hdr->fh_f_ctl[1] << 8 |
11231 hdr->fh_f_ctl[2]);
11232 /* If last frame of sequence we can return success. */
11233 if (fctl & FC_FC_END_SEQ)
11234 return 1;
11235 }
11236 return 0;
11237}
11238
11239/**
11240 * lpfc_prep_seq - Prep sequence for ULP processing
11241 * @vport: Pointer to the vport on which this sequence was received
11242 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11243 *
11244 * This function takes a sequence, described by a list of frames, and creates
11245 * a list of iocbq structures to describe the sequence. This iocbq list will be
11246 * used to issue to the generic unsolicited sequence handler. This routine
11247 * returns a pointer to the first iocbq in the list. If the function is unable
11248 * to allocate an iocbq then it throw out the received frames that were not
11249 * able to be described and return a pointer to the first iocbq. If unable to
11250 * allocate any iocbqs (including the first) this function will return NULL.
11251 **/
11252static struct lpfc_iocbq *
11253lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11254{
11255 struct lpfc_dmabuf *d_buf, *n_buf;
11256 struct lpfc_iocbq *first_iocbq, *iocbq;
11257 struct fc_frame_header *fc_hdr;
11258 uint32_t sid;
eeead811 11259 struct ulp_bde64 *pbde;
4f774513
JS
11260
11261 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11262 /* remove from receive buffer list */
11263 list_del_init(&seq_dmabuf->hbuf.list);
45ed1190 11264 lpfc_update_rcv_time_stamp(vport);
4f774513 11265 /* get the Remote Port's SID */
6669f9bb 11266 sid = sli4_sid_from_fc_hdr(fc_hdr);
4f774513
JS
11267 /* Get an iocbq struct to fill in. */
11268 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
11269 if (first_iocbq) {
11270 /* Initialize the first IOCB. */
8fa38513 11271 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
4f774513
JS
11272 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
11273 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
11274 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
11275 first_iocbq->iocb.unsli3.rcvsli3.vpi =
11276 vport->vpi + vport->phba->vpi_base;
11277 /* put the first buffer into the first IOCBq */
11278 first_iocbq->context2 = &seq_dmabuf->dbuf;
11279 first_iocbq->context3 = NULL;
11280 first_iocbq->iocb.ulpBdeCount = 1;
11281 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
11282 LPFC_DATA_BUF_SIZE;
11283 first_iocbq->iocb.un.rcvels.remoteID = sid;
8fa38513 11284 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
4d9ab994
JS
11285 bf_get(lpfc_rcqe_length,
11286 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
11287 }
11288 iocbq = first_iocbq;
11289 /*
11290 * Each IOCBq can have two Buffers assigned, so go through the list
11291 * of buffers for this sequence and save two buffers in each IOCBq
11292 */
11293 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
11294 if (!iocbq) {
11295 lpfc_in_buf_free(vport->phba, d_buf);
11296 continue;
11297 }
11298 if (!iocbq->context3) {
11299 iocbq->context3 = d_buf;
11300 iocbq->iocb.ulpBdeCount++;
eeead811
JS
11301 pbde = (struct ulp_bde64 *)
11302 &iocbq->iocb.unsli3.sli3Words[4];
11303 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
8fa38513 11304 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
4d9ab994
JS
11305 bf_get(lpfc_rcqe_length,
11306 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
11307 } else {
11308 iocbq = lpfc_sli_get_iocbq(vport->phba);
11309 if (!iocbq) {
11310 if (first_iocbq) {
11311 first_iocbq->iocb.ulpStatus =
11312 IOSTAT_FCP_RSP_ERROR;
11313 first_iocbq->iocb.un.ulpWord[4] =
11314 IOERR_NO_RESOURCES;
11315 }
11316 lpfc_in_buf_free(vport->phba, d_buf);
11317 continue;
11318 }
11319 iocbq->context2 = d_buf;
11320 iocbq->context3 = NULL;
11321 iocbq->iocb.ulpBdeCount = 1;
11322 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
11323 LPFC_DATA_BUF_SIZE;
8fa38513 11324 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
4d9ab994
JS
11325 bf_get(lpfc_rcqe_length,
11326 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
4f774513
JS
11327 iocbq->iocb.un.rcvels.remoteID = sid;
11328 list_add_tail(&iocbq->list, &first_iocbq->list);
11329 }
11330 }
11331 return first_iocbq;
11332}
11333
6669f9bb
JS
11334static void
11335lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
11336 struct hbq_dmabuf *seq_dmabuf)
11337{
11338 struct fc_frame_header *fc_hdr;
11339 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
11340 struct lpfc_hba *phba = vport->phba;
11341
11342 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11343 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11344 if (!iocbq) {
11345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11346 "2707 Ring %d handler: Failed to allocate "
11347 "iocb Rctl x%x Type x%x received\n",
11348 LPFC_ELS_RING,
11349 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11350 return;
11351 }
11352 if (!lpfc_complete_unsol_iocb(phba,
11353 &phba->sli.ring[LPFC_ELS_RING],
11354 iocbq, fc_hdr->fh_r_ctl,
11355 fc_hdr->fh_type))
11356 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11357 "2540 Ring %d handler: unexpected Rctl "
11358 "x%x Type x%x received\n",
11359 LPFC_ELS_RING,
11360 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11361
11362 /* Free iocb created in lpfc_prep_seq */
11363 list_for_each_entry_safe(curr_iocb, next_iocb,
11364 &iocbq->list, list) {
11365 list_del_init(&curr_iocb->list);
11366 lpfc_sli_release_iocbq(phba, curr_iocb);
11367 }
11368 lpfc_sli_release_iocbq(phba, iocbq);
11369}
11370
4f774513
JS
11371/**
11372 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11373 * @phba: Pointer to HBA context object.
11374 *
11375 * This function is called with no lock held. This function processes all
11376 * the received buffers and gives it to upper layers when a received buffer
11377 * indicates that it is the final frame in the sequence. The interrupt
11378 * service routine processes received buffers at interrupt contexts and adds
11379 * received dma buffers to the rb_pend_list queue and signals the worker thread.
11380 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11381 * appropriate receive function when the final frame in a sequence is received.
11382 **/
4d9ab994
JS
11383void
11384lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11385 struct hbq_dmabuf *dmabuf)
4f774513 11386{
4d9ab994 11387 struct hbq_dmabuf *seq_dmabuf;
4f774513
JS
11388 struct fc_frame_header *fc_hdr;
11389 struct lpfc_vport *vport;
11390 uint32_t fcfi;
4f774513 11391
4f774513 11392 /* Process each received buffer */
4d9ab994
JS
11393 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11394 /* check to see if this a valid type of frame */
11395 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11396 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11397 return;
11398 }
11399 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11400 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
c868595d 11401 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
4d9ab994
JS
11402 /* throw out the frame */
11403 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11404 return;
11405 }
6669f9bb
JS
11406 /* Handle the basic abort sequence (BA_ABTS) event */
11407 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
11408 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
11409 return;
11410 }
11411
4d9ab994
JS
11412 /* Link this frame */
11413 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11414 if (!seq_dmabuf) {
11415 /* unable to add frame to vport - throw it out */
11416 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11417 return;
11418 }
11419 /* If not last frame in sequence continue processing frames. */
def9c7a9 11420 if (!lpfc_seq_complete(seq_dmabuf))
4d9ab994 11421 return;
def9c7a9 11422
6669f9bb
JS
11423 /* Send the complete sequence to the upper layer protocol */
11424 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
4f774513 11425}
6fb120a7
JS
11426
11427/**
11428 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
11429 * @phba: pointer to lpfc hba data structure.
11430 *
11431 * This routine is invoked to post rpi header templates to the
11432 * HBA consistent with the SLI-4 interface spec. This routine
11433 * posts a PAGE_SIZE memory region to the port to hold up to
11434 * PAGE_SIZE modulo 64 rpi context headers.
11435 *
11436 * This routine does not require any locks. It's usage is expected
11437 * to be driver load or reset recovery when the driver is
11438 * sequential.
11439 *
11440 * Return codes
af901ca1 11441 * 0 - successful
6fb120a7
JS
11442 * EIO - The mailbox failed to complete successfully.
11443 * When this error occurs, the driver is not guaranteed
11444 * to have any rpi regions posted to the device and
11445 * must either attempt to repost the regions or take a
11446 * fatal error.
11447 **/
11448int
11449lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11450{
11451 struct lpfc_rpi_hdr *rpi_page;
11452 uint32_t rc = 0;
11453
11454 /* Post all rpi memory regions to the port. */
11455 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
11456 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
11457 if (rc != MBX_SUCCESS) {
11458 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11459 "2008 Error %d posting all rpi "
11460 "headers\n", rc);
11461 rc = -EIO;
11462 break;
11463 }
11464 }
11465
11466 return rc;
11467}
11468
11469/**
11470 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
11471 * @phba: pointer to lpfc hba data structure.
11472 * @rpi_page: pointer to the rpi memory region.
11473 *
11474 * This routine is invoked to post a single rpi header to the
11475 * HBA consistent with the SLI-4 interface spec. This memory region
11476 * maps up to 64 rpi context regions.
11477 *
11478 * Return codes
af901ca1 11479 * 0 - successful
6fb120a7
JS
11480 * ENOMEM - No available memory
11481 * EIO - The mailbox failed to complete successfully.
11482 **/
11483int
11484lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11485{
11486 LPFC_MBOXQ_t *mboxq;
11487 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
11488 uint32_t rc = 0;
11489 uint32_t mbox_tmo;
11490 uint32_t shdr_status, shdr_add_status;
11491 union lpfc_sli4_cfg_shdr *shdr;
11492
11493 /* The port is notified of the header region via a mailbox command. */
11494 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11495 if (!mboxq) {
11496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11497 "2001 Unable to allocate memory for issuing "
11498 "SLI_CONFIG_SPECIAL mailbox command\n");
11499 return -ENOMEM;
11500 }
11501
11502 /* Post all rpi memory regions to the port. */
11503 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11504 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11505 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11506 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11507 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11508 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11509 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11510 hdr_tmpl, rpi_page->page_count);
11511 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11512 rpi_page->start_rpi);
11513 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11514 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
f1126688 11515 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6fb120a7
JS
11516 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11517 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11518 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11519 if (rc != MBX_TIMEOUT)
11520 mempool_free(mboxq, phba->mbox_mem_pool);
11521 if (shdr_status || shdr_add_status || rc) {
11522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11523 "2514 POST_RPI_HDR mailbox failed with "
11524 "status x%x add_status x%x, mbx status x%x\n",
11525 shdr_status, shdr_add_status, rc);
11526 rc = -ENXIO;
11527 }
11528 return rc;
11529}
11530
11531/**
11532 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11533 * @phba: pointer to lpfc hba data structure.
11534 *
11535 * This routine is invoked to post rpi header templates to the
11536 * HBA consistent with the SLI-4 interface spec. This routine
11537 * posts a PAGE_SIZE memory region to the port to hold up to
11538 * PAGE_SIZE modulo 64 rpi context headers.
11539 *
11540 * Returns
af901ca1 11541 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
6fb120a7
JS
11542 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11543 **/
11544int
11545lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11546{
11547 int rpi;
11548 uint16_t max_rpi, rpi_base, rpi_limit;
11549 uint16_t rpi_remaining;
11550 struct lpfc_rpi_hdr *rpi_hdr;
11551
11552 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11553 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11554 rpi_limit = phba->sli4_hba.next_rpi;
11555
11556 /*
11557 * The valid rpi range is not guaranteed to be zero-based. Start
11558 * the search at the rpi_base as reported by the port.
11559 */
11560 spin_lock_irq(&phba->hbalock);
11561 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11562 if (rpi >= rpi_limit || rpi < rpi_base)
11563 rpi = LPFC_RPI_ALLOC_ERROR;
11564 else {
11565 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11566 phba->sli4_hba.max_cfg_param.rpi_used++;
11567 phba->sli4_hba.rpi_count++;
11568 }
11569
11570 /*
11571 * Don't try to allocate more rpi header regions if the device limit
11572 * on available rpis max has been exhausted.
11573 */
11574 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11575 (phba->sli4_hba.rpi_count >= max_rpi)) {
11576 spin_unlock_irq(&phba->hbalock);
11577 return rpi;
11578 }
11579
11580 /*
11581 * If the driver is running low on rpi resources, allocate another
11582 * page now. Note that the next_rpi value is used because
11583 * it represents how many are actually in use whereas max_rpi notes
11584 * how many are supported max by the device.
11585 */
11586 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11587 phba->sli4_hba.rpi_count;
11588 spin_unlock_irq(&phba->hbalock);
11589 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11590 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11591 if (!rpi_hdr) {
11592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11593 "2002 Error Could not grow rpi "
11594 "count\n");
11595 } else {
11596 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11597 }
11598 }
11599
11600 return rpi;
11601}
11602
11603/**
11604 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11605 * @phba: pointer to lpfc hba data structure.
11606 *
11607 * This routine is invoked to release an rpi to the pool of
11608 * available rpis maintained by the driver.
11609 **/
11610void
11611lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11612{
11613 spin_lock_irq(&phba->hbalock);
11614 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11615 phba->sli4_hba.rpi_count--;
11616 phba->sli4_hba.max_cfg_param.rpi_used--;
11617 spin_unlock_irq(&phba->hbalock);
11618}
11619
11620/**
11621 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11622 * @phba: pointer to lpfc hba data structure.
11623 *
11624 * This routine is invoked to remove the memory region that
11625 * provided rpi via a bitmask.
11626 **/
11627void
11628lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11629{
11630 kfree(phba->sli4_hba.rpi_bmask);
11631}
11632
11633/**
11634 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11635 * @phba: pointer to lpfc hba data structure.
11636 *
11637 * This routine is invoked to remove the memory region that
11638 * provided rpi via a bitmask.
11639 **/
11640int
11641lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11642{
11643 LPFC_MBOXQ_t *mboxq;
11644 struct lpfc_hba *phba = ndlp->phba;
11645 int rc;
11646
11647 /* The port is notified of the header region via a mailbox command. */
11648 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11649 if (!mboxq)
11650 return -ENOMEM;
11651
11652 /* Post all rpi memory regions to the port. */
11653 lpfc_resume_rpi(mboxq, ndlp);
11654 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11655 if (rc == MBX_NOT_FINISHED) {
11656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11657 "2010 Resume RPI Mailbox failed "
11658 "status %d, mbxStatus x%x\n", rc,
11659 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11660 mempool_free(mboxq, phba->mbox_mem_pool);
11661 return -EIO;
11662 }
11663 return 0;
11664}
11665
11666/**
11667 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11668 * @phba: pointer to lpfc hba data structure.
11669 * @vpi: vpi value to activate with the port.
11670 *
11671 * This routine is invoked to activate a vpi with the
11672 * port when the host intends to use vports with a
11673 * nonzero vpi.
11674 *
11675 * Returns:
11676 * 0 success
11677 * -Evalue otherwise
11678 **/
11679int
11680lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11681{
11682 LPFC_MBOXQ_t *mboxq;
11683 int rc = 0;
6a9c52cf 11684 int retval = MBX_SUCCESS;
6fb120a7
JS
11685 uint32_t mbox_tmo;
11686
11687 if (vpi == 0)
11688 return -EINVAL;
11689 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11690 if (!mboxq)
11691 return -ENOMEM;
1c6834a7 11692 lpfc_init_vpi(phba, mboxq, vpi);
6fb120a7
JS
11693 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11694 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6fb120a7
JS
11695 if (rc != MBX_SUCCESS) {
11696 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11697 "2022 INIT VPI Mailbox failed "
11698 "status %d, mbxStatus x%x\n", rc,
11699 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6a9c52cf 11700 retval = -EIO;
6fb120a7 11701 }
6a9c52cf
JS
11702 if (rc != MBX_TIMEOUT)
11703 mempool_free(mboxq, phba->mbox_mem_pool);
11704
11705 return retval;
6fb120a7
JS
11706}
11707
11708/**
11709 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11710 * @phba: pointer to lpfc hba data structure.
11711 * @mboxq: Pointer to mailbox object.
11712 *
11713 * This routine is invoked to manually add a single FCF record. The caller
11714 * must pass a completely initialized FCF_Record. This routine takes
11715 * care of the nonembedded mailbox operations.
11716 **/
11717static void
11718lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11719{
11720 void *virt_addr;
11721 union lpfc_sli4_cfg_shdr *shdr;
11722 uint32_t shdr_status, shdr_add_status;
11723
11724 virt_addr = mboxq->sge_array->addr[0];
11725 /* The IOCTL status is embedded in the mailbox subheader. */
11726 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11727 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11728 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11729
11730 if ((shdr_status || shdr_add_status) &&
11731 (shdr_status != STATUS_FCF_IN_USE))
11732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11733 "2558 ADD_FCF_RECORD mailbox failed with "
11734 "status x%x add_status x%x\n",
11735 shdr_status, shdr_add_status);
11736
11737 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11738}
11739
11740/**
11741 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11742 * @phba: pointer to lpfc hba data structure.
11743 * @fcf_record: pointer to the initialized fcf record to add.
11744 *
11745 * This routine is invoked to manually add a single FCF record. The caller
11746 * must pass a completely initialized FCF_Record. This routine takes
11747 * care of the nonembedded mailbox operations.
11748 **/
11749int
11750lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11751{
11752 int rc = 0;
11753 LPFC_MBOXQ_t *mboxq;
11754 uint8_t *bytep;
11755 void *virt_addr;
11756 dma_addr_t phys_addr;
11757 struct lpfc_mbx_sge sge;
11758 uint32_t alloc_len, req_len;
11759 uint32_t fcfindex;
11760
11761 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11762 if (!mboxq) {
11763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11764 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11765 return -ENOMEM;
11766 }
11767
11768 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11769 sizeof(uint32_t);
11770
11771 /* Allocate DMA memory and set up the non-embedded mailbox command */
11772 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11773 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11774 req_len, LPFC_SLI4_MBX_NEMBED);
11775 if (alloc_len < req_len) {
11776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11777 "2523 Allocated DMA memory size (x%x) is "
11778 "less than the requested DMA memory "
11779 "size (x%x)\n", alloc_len, req_len);
11780 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11781 return -ENOMEM;
11782 }
11783
11784 /*
11785 * Get the first SGE entry from the non-embedded DMA memory. This
11786 * routine only uses a single SGE.
11787 */
11788 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11789 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
6fb120a7
JS
11790 virt_addr = mboxq->sge_array->addr[0];
11791 /*
11792 * Configure the FCF record for FCFI 0. This is the driver's
11793 * hardcoded default and gets used in nonFIP mode.
11794 */
11795 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11796 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11797 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11798
11799 /*
11800 * Copy the fcf_index and the FCF Record Data. The data starts after
11801 * the FCoE header plus word10. The data copy needs to be endian
11802 * correct.
11803 */
11804 bytep += sizeof(uint32_t);
11805 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11806 mboxq->vport = phba->pport;
11807 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11808 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11809 if (rc == MBX_NOT_FINISHED) {
11810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11811 "2515 ADD_FCF_RECORD mailbox failed with "
11812 "status 0x%x\n", rc);
11813 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11814 rc = -EIO;
11815 } else
11816 rc = 0;
11817
11818 return rc;
11819}
11820
11821/**
11822 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11823 * @phba: pointer to lpfc hba data structure.
11824 * @fcf_record: pointer to the fcf record to write the default data.
11825 * @fcf_index: FCF table entry index.
11826 *
11827 * This routine is invoked to build the driver's default FCF record. The
11828 * values used are hardcoded. This routine handles memory initialization.
11829 *
11830 **/
11831void
11832lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11833 struct fcf_record *fcf_record,
11834 uint16_t fcf_index)
11835{
11836 memset(fcf_record, 0, sizeof(struct fcf_record));
11837 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11838 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11839 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11840 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11841 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11842 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11843 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11844 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11845 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11846 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11847 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11848 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11849 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
0c287589 11850 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
6fb120a7
JS
11851 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11852 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11853 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11854 /* Set the VLAN bit map */
11855 if (phba->valid_vlan) {
11856 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11857 = 1 << (phba->vlan_id % 8);
11858 }
11859}
11860
11861/**
11862 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11863 * @phba: pointer to lpfc hba data structure.
11864 * @fcf_index: FCF table entry offset.
11865 *
11866 * This routine is invoked to read up to @fcf_num of FCF record from the
11867 * device starting with the given @fcf_index.
11868 **/
11869int
11870lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11871{
11872 int rc = 0, error;
11873 LPFC_MBOXQ_t *mboxq;
11874 void *virt_addr;
11875 dma_addr_t phys_addr;
11876 uint8_t *bytep;
11877 struct lpfc_mbx_sge sge;
11878 uint32_t alloc_len, req_len;
11879 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11880
32b9793f 11881 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
6fb120a7
JS
11882 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11883 if (!mboxq) {
11884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11885 "2000 Failed to allocate mbox for "
11886 "READ_FCF cmd\n");
4d9ab994
JS
11887 error = -ENOMEM;
11888 goto fail_fcfscan;
6fb120a7
JS
11889 }
11890
11891 req_len = sizeof(struct fcf_record) +
11892 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11893
11894 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11895 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11896 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11897 LPFC_SLI4_MBX_NEMBED);
11898
11899 if (alloc_len < req_len) {
11900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11901 "0291 Allocated DMA memory size (x%x) is "
11902 "less than the requested DMA memory "
11903 "size (x%x)\n", alloc_len, req_len);
4d9ab994
JS
11904 error = -ENOMEM;
11905 goto fail_fcfscan;
6fb120a7
JS
11906 }
11907
11908 /* Get the first SGE entry from the non-embedded DMA memory. This
11909 * routine only uses a single SGE.
11910 */
11911 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11912 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
6fb120a7
JS
11913 virt_addr = mboxq->sge_array->addr[0];
11914 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11915
11916 /* Set up command fields */
11917 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11918 /* Perform necessary endian conversion */
11919 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11920 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11921 mboxq->vport = phba->pport;
11922 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11923 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11924 if (rc == MBX_NOT_FINISHED) {
6fb120a7 11925 error = -EIO;
32b9793f
JS
11926 } else {
11927 spin_lock_irq(&phba->hbalock);
11928 phba->hba_flag |= FCF_DISC_INPROGRESS;
11929 spin_unlock_irq(&phba->hbalock);
6fb120a7 11930 error = 0;
32b9793f 11931 }
4d9ab994
JS
11932fail_fcfscan:
11933 if (error) {
11934 if (mboxq)
11935 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11936 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
11937 spin_lock_irq(&phba->hbalock);
11938 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
11939 spin_unlock_irq(&phba->hbalock);
11940 }
6fb120a7
JS
11941 return error;
11942}
a0c87cbd
JS
11943
11944/**
11945 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
11946 * @phba: pointer to lpfc hba data structure.
11947 *
11948 * This function read region 23 and parse TLV for port status to
11949 * decide if the user disaled the port. If the TLV indicates the
11950 * port is disabled, the hba_flag is set accordingly.
11951 **/
11952void
11953lpfc_sli_read_link_ste(struct lpfc_hba *phba)
11954{
11955 LPFC_MBOXQ_t *pmb = NULL;
11956 MAILBOX_t *mb;
11957 uint8_t *rgn23_data = NULL;
11958 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
11959 int rc;
11960
11961 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11962 if (!pmb) {
11963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11964 "2600 lpfc_sli_read_serdes_param failed to"
11965 " allocate mailbox memory\n");
11966 goto out;
11967 }
11968 mb = &pmb->u.mb;
11969
11970 /* Get adapter Region 23 data */
11971 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
11972 if (!rgn23_data)
11973 goto out;
11974
11975 do {
11976 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
11977 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11978
11979 if (rc != MBX_SUCCESS) {
11980 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11981 "2601 lpfc_sli_read_link_ste failed to"
11982 " read config region 23 rc 0x%x Status 0x%x\n",
11983 rc, mb->mbxStatus);
11984 mb->un.varDmp.word_cnt = 0;
11985 }
11986 /*
11987 * dump mem may return a zero when finished or we got a
11988 * mailbox error, either way we are done.
11989 */
11990 if (mb->un.varDmp.word_cnt == 0)
11991 break;
11992 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
11993 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
11994
11995 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
11996 rgn23_data + offset,
11997 mb->un.varDmp.word_cnt);
11998 offset += mb->un.varDmp.word_cnt;
11999 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
12000
12001 data_size = offset;
12002 offset = 0;
12003
12004 if (!data_size)
12005 goto out;
12006
12007 /* Check the region signature first */
12008 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
12009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12010 "2619 Config region 23 has bad signature\n");
12011 goto out;
12012 }
12013 offset += 4;
12014
12015 /* Check the data structure version */
12016 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
12017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12018 "2620 Config region 23 has bad version\n");
12019 goto out;
12020 }
12021 offset += 4;
12022
12023 /* Parse TLV entries in the region */
12024 while (offset < data_size) {
12025 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
12026 break;
12027 /*
12028 * If the TLV is not driver specific TLV or driver id is
12029 * not linux driver id, skip the record.
12030 */
12031 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
12032 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
12033 (rgn23_data[offset + 3] != 0)) {
12034 offset += rgn23_data[offset + 1] * 4 + 4;
12035 continue;
12036 }
12037
12038 /* Driver found a driver specific TLV in the config region */
12039 sub_tlv_len = rgn23_data[offset + 1] * 4;
12040 offset += 4;
12041 tlv_offset = 0;
12042
12043 /*
12044 * Search for configured port state sub-TLV.
12045 */
12046 while ((offset < data_size) &&
12047 (tlv_offset < sub_tlv_len)) {
12048 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
12049 offset += 4;
12050 tlv_offset += 4;
12051 break;
12052 }
12053 if (rgn23_data[offset] != PORT_STE_TYPE) {
12054 offset += rgn23_data[offset + 1] * 4 + 4;
12055 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
12056 continue;
12057 }
12058
12059 /* This HBA contains PORT_STE configured */
12060 if (!rgn23_data[offset + 2])
12061 phba->hba_flag |= LINK_DISABLED;
12062
12063 goto out;
12064 }
12065 }
12066out:
12067 if (pmb)
12068 mempool_free(pmb, phba->mbox_mem_pool);
12069 kfree(rgn23_data);
12070 return;
12071}