]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/benet/be_cmds.c
be2net: Patch to flash redboot section while firmware update.
[net-next-2.6.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 }
30
31 /* To check if valid bit is set, check the entire word as we don't know
32  * the endianness of the data (old entry is host endian while a new entry is
33  * little endian) */
34 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 {
36         if (compl->flags != 0) {
37                 compl->flags = le32_to_cpu(compl->flags);
38                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
39                 return true;
40         } else {
41                 return false;
42         }
43 }
44
45 /* Need to reset the entire word that houses the valid bit */
46 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 {
48         compl->flags = 0;
49 }
50
51 static int be_mcc_compl_process(struct be_adapter *adapter,
52         struct be_mcc_compl *compl)
53 {
54         u16 compl_status, extd_status;
55
56         /* Just swap the status to host endian; mcc tag is opaquely copied
57          * from mcc_wrb */
58         be_dws_le_to_cpu(compl, 4);
59
60         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
61                                 CQE_STATUS_COMPL_MASK;
62         if (compl_status == MCC_STATUS_SUCCESS) {
63                 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
64                         struct be_cmd_resp_get_stats *resp =
65                                                 adapter->stats.cmd.va;
66                         be_dws_le_to_cpu(&resp->hw_stats,
67                                                 sizeof(resp->hw_stats));
68                         netdev_stats_update(adapter);
69                 }
70         } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
72                                 CQE_STATUS_EXTD_MASK;
73                 dev_warn(&adapter->pdev->dev,
74                         "Error in cmd completion: status(compl/extd)=%d/%d\n",
75                         compl_status, extd_status);
76         }
77         return compl_status;
78 }
79
80 /* Link state evt is a string of bytes; no need for endian swapping */
81 static void be_async_link_state_process(struct be_adapter *adapter,
82                 struct be_async_event_link_state *evt)
83 {
84         be_link_status_update(adapter,
85                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 }
87
88 static inline bool is_link_state_evt(u32 trailer)
89 {
90         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
91                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
92                                 ASYNC_EVENT_CODE_LINK_STATE);
93 }
94
95 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96 {
97         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99
100         if (be_mcc_compl_is_new(compl)) {
101                 queue_tail_inc(mcc_cq);
102                 return compl;
103         }
104         return NULL;
105 }
106
107 int be_process_mcc(struct be_adapter *adapter)
108 {
109         struct be_mcc_compl *compl;
110         int num = 0, status = 0;
111
112         spin_lock_bh(&adapter->mcc_cq_lock);
113         while ((compl = be_mcc_compl_get(adapter))) {
114                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
115                         /* Interpret flags as an async trailer */
116                         BUG_ON(!is_link_state_evt(compl->flags));
117
118                         /* Interpret compl as a async link evt */
119                         be_async_link_state_process(adapter,
120                                 (struct be_async_event_link_state *) compl);
121                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
122                                 status = be_mcc_compl_process(adapter, compl);
123                                 atomic_dec(&adapter->mcc_obj.q.used);
124                 }
125                 be_mcc_compl_use(compl);
126                 num++;
127         }
128
129         if (num)
130                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
131
132         spin_unlock_bh(&adapter->mcc_cq_lock);
133         return status;
134 }
135
136 /* Wait till no more pending mcc requests are present */
137 static int be_mcc_wait_compl(struct be_adapter *adapter)
138 {
139 #define mcc_timeout             120000 /* 12s timeout */
140         int i, status;
141         for (i = 0; i < mcc_timeout; i++) {
142                 status = be_process_mcc(adapter);
143                 if (status)
144                         return status;
145
146                 if (atomic_read(&adapter->mcc_obj.q.used) == 0)
147                         break;
148                 udelay(100);
149         }
150         if (i == mcc_timeout) {
151                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
152                 return -1;
153         }
154         return 0;
155 }
156
157 /* Notify MCC requests and wait for completion */
158 static int be_mcc_notify_wait(struct be_adapter *adapter)
159 {
160         be_mcc_notify(adapter);
161         return be_mcc_wait_compl(adapter);
162 }
163
164 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
165 {
166         int cnt = 0, wait = 5;
167         u32 ready;
168
169         do {
170                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
171                 if (ready)
172                         break;
173
174                 if (cnt > 4000000) {
175                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
176                         return -1;
177                 }
178
179                 if (cnt > 50)
180                         wait = 200;
181                 cnt += wait;
182                 udelay(wait);
183         } while (true);
184
185         return 0;
186 }
187
188 /*
189  * Insert the mailbox address into the doorbell in two steps
190  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
191  */
192 static int be_mbox_notify_wait(struct be_adapter *adapter)
193 {
194         int status;
195         u32 val = 0;
196         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
197         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
198         struct be_mcc_mailbox *mbox = mbox_mem->va;
199         struct be_mcc_compl *compl = &mbox->compl;
200
201         val |= MPU_MAILBOX_DB_HI_MASK;
202         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
203         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
204         iowrite32(val, db);
205
206         /* wait for ready to be set */
207         status = be_mbox_db_ready_wait(adapter, db);
208         if (status != 0)
209                 return status;
210
211         val = 0;
212         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
213         val |= (u32)(mbox_mem->dma >> 4) << 2;
214         iowrite32(val, db);
215
216         status = be_mbox_db_ready_wait(adapter, db);
217         if (status != 0)
218                 return status;
219
220         /* A cq entry has been made now */
221         if (be_mcc_compl_is_new(compl)) {
222                 status = be_mcc_compl_process(adapter, &mbox->compl);
223                 be_mcc_compl_use(compl);
224                 if (status)
225                         return status;
226         } else {
227                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
228                 return -1;
229         }
230         return 0;
231 }
232
233 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
234 {
235         u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
236
237         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
238         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
239                 return -1;
240         else
241                 return 0;
242 }
243
244 int be_cmd_POST(struct be_adapter *adapter)
245 {
246         u16 stage;
247         int status, timeout = 0;
248
249         do {
250                 status = be_POST_stage_get(adapter, &stage);
251                 if (status) {
252                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
253                                 stage);
254                         return -1;
255                 } else if (stage != POST_STAGE_ARMFW_RDY) {
256                         set_current_state(TASK_INTERRUPTIBLE);
257                         schedule_timeout(2 * HZ);
258                         timeout += 2;
259                 } else {
260                         return 0;
261                 }
262         } while (timeout < 20);
263
264         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
265         return -1;
266 }
267
268 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
269 {
270         return wrb->payload.embedded_payload;
271 }
272
273 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
274 {
275         return &wrb->payload.sgl[0];
276 }
277
278 /* Don't touch the hdr after it's prepared */
279 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
280                                 bool embedded, u8 sge_cnt)
281 {
282         if (embedded)
283                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
284         else
285                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
286                                 MCC_WRB_SGE_CNT_SHIFT;
287         wrb->payload_length = payload_len;
288         be_dws_cpu_to_le(wrb, 20);
289 }
290
291 /* Don't touch the hdr after it's prepared */
292 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
293                                 u8 subsystem, u8 opcode, int cmd_len)
294 {
295         req_hdr->opcode = opcode;
296         req_hdr->subsystem = subsystem;
297         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
298 }
299
300 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
301                         struct be_dma_mem *mem)
302 {
303         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
304         u64 dma = (u64)mem->dma;
305
306         for (i = 0; i < buf_pages; i++) {
307                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
308                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
309                 dma += PAGE_SIZE_4K;
310         }
311 }
312
313 /* Converts interrupt delay in microseconds to multiplier value */
314 static u32 eq_delay_to_mult(u32 usec_delay)
315 {
316 #define MAX_INTR_RATE                   651042
317         const u32 round = 10;
318         u32 multiplier;
319
320         if (usec_delay == 0)
321                 multiplier = 0;
322         else {
323                 u32 interrupt_rate = 1000000 / usec_delay;
324                 /* Max delay, corresponding to the lowest interrupt rate */
325                 if (interrupt_rate == 0)
326                         multiplier = 1023;
327                 else {
328                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
329                         multiplier /= interrupt_rate;
330                         /* Round the multiplier to the closest value.*/
331                         multiplier = (multiplier + round/2) / round;
332                         multiplier = min(multiplier, (u32)1023);
333                 }
334         }
335         return multiplier;
336 }
337
338 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
339 {
340         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
341         struct be_mcc_wrb *wrb
342                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
343         memset(wrb, 0, sizeof(*wrb));
344         return wrb;
345 }
346
347 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
348 {
349         struct be_queue_info *mccq = &adapter->mcc_obj.q;
350         struct be_mcc_wrb *wrb;
351
352         BUG_ON(atomic_read(&mccq->used) >= mccq->len);
353         wrb = queue_head_node(mccq);
354         queue_head_inc(mccq);
355         atomic_inc(&mccq->used);
356         memset(wrb, 0, sizeof(*wrb));
357         return wrb;
358 }
359
360 int be_cmd_eq_create(struct be_adapter *adapter,
361                 struct be_queue_info *eq, int eq_delay)
362 {
363         struct be_mcc_wrb *wrb;
364         struct be_cmd_req_eq_create *req;
365         struct be_dma_mem *q_mem = &eq->dma_mem;
366         int status;
367
368         spin_lock(&adapter->mbox_lock);
369
370         wrb = wrb_from_mbox(adapter);
371         req = embedded_payload(wrb);
372
373         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
374
375         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
376                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
377
378         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
379
380         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
381                         be_pci_func(adapter));
382         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
383         /* 4byte eqe*/
384         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
385         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
386                         __ilog2_u32(eq->len/256));
387         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
388                         eq_delay_to_mult(eq_delay));
389         be_dws_cpu_to_le(req->context, sizeof(req->context));
390
391         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
392
393         status = be_mbox_notify_wait(adapter);
394         if (!status) {
395                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
396                 eq->id = le16_to_cpu(resp->eq_id);
397                 eq->created = true;
398         }
399
400         spin_unlock(&adapter->mbox_lock);
401         return status;
402 }
403
404 /* Uses mbox */
405 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
406                         u8 type, bool permanent, u32 if_handle)
407 {
408         struct be_mcc_wrb *wrb;
409         struct be_cmd_req_mac_query *req;
410         int status;
411
412         spin_lock(&adapter->mbox_lock);
413
414         wrb = wrb_from_mbox(adapter);
415         req = embedded_payload(wrb);
416
417         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
418
419         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
420                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
421
422         req->type = type;
423         if (permanent) {
424                 req->permanent = 1;
425         } else {
426                 req->if_id = cpu_to_le16((u16) if_handle);
427                 req->permanent = 0;
428         }
429
430         status = be_mbox_notify_wait(adapter);
431         if (!status) {
432                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
433                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
434         }
435
436         spin_unlock(&adapter->mbox_lock);
437         return status;
438 }
439
440 /* Uses synchronous MCCQ */
441 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
442                 u32 if_id, u32 *pmac_id)
443 {
444         struct be_mcc_wrb *wrb;
445         struct be_cmd_req_pmac_add *req;
446         int status;
447
448         spin_lock_bh(&adapter->mcc_lock);
449
450         wrb = wrb_from_mccq(adapter);
451         req = embedded_payload(wrb);
452
453         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
454
455         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
456                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
457
458         req->if_id = cpu_to_le32(if_id);
459         memcpy(req->mac_address, mac_addr, ETH_ALEN);
460
461         status = be_mcc_notify_wait(adapter);
462         if (!status) {
463                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
464                 *pmac_id = le32_to_cpu(resp->pmac_id);
465         }
466
467         spin_unlock_bh(&adapter->mcc_lock);
468         return status;
469 }
470
471 /* Uses synchronous MCCQ */
472 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
473 {
474         struct be_mcc_wrb *wrb;
475         struct be_cmd_req_pmac_del *req;
476         int status;
477
478         spin_lock_bh(&adapter->mcc_lock);
479
480         wrb = wrb_from_mccq(adapter);
481         req = embedded_payload(wrb);
482
483         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
484
485         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
486                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
487
488         req->if_id = cpu_to_le32(if_id);
489         req->pmac_id = cpu_to_le32(pmac_id);
490
491         status = be_mcc_notify_wait(adapter);
492
493         spin_unlock_bh(&adapter->mcc_lock);
494
495         return status;
496 }
497
498 /* Uses Mbox */
499 int be_cmd_cq_create(struct be_adapter *adapter,
500                 struct be_queue_info *cq, struct be_queue_info *eq,
501                 bool sol_evts, bool no_delay, int coalesce_wm)
502 {
503         struct be_mcc_wrb *wrb;
504         struct be_cmd_req_cq_create *req;
505         struct be_dma_mem *q_mem = &cq->dma_mem;
506         void *ctxt;
507         int status;
508
509         spin_lock(&adapter->mbox_lock);
510
511         wrb = wrb_from_mbox(adapter);
512         req = embedded_payload(wrb);
513         ctxt = &req->context;
514
515         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
516
517         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
518                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
519
520         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
521
522         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
523         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
524         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
525                         __ilog2_u32(cq->len/256));
526         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
527         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
528         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
529         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
530         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
531         AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
532         be_dws_cpu_to_le(ctxt, sizeof(req->context));
533
534         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
535
536         status = be_mbox_notify_wait(adapter);
537         if (!status) {
538                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
539                 cq->id = le16_to_cpu(resp->cq_id);
540                 cq->created = true;
541         }
542
543         spin_unlock(&adapter->mbox_lock);
544
545         return status;
546 }
547
548 static u32 be_encoded_q_len(int q_len)
549 {
550         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
551         if (len_encoded == 16)
552                 len_encoded = 0;
553         return len_encoded;
554 }
555
556 int be_cmd_mccq_create(struct be_adapter *adapter,
557                         struct be_queue_info *mccq,
558                         struct be_queue_info *cq)
559 {
560         struct be_mcc_wrb *wrb;
561         struct be_cmd_req_mcc_create *req;
562         struct be_dma_mem *q_mem = &mccq->dma_mem;
563         void *ctxt;
564         int status;
565
566         spin_lock(&adapter->mbox_lock);
567
568         wrb = wrb_from_mbox(adapter);
569         req = embedded_payload(wrb);
570         ctxt = &req->context;
571
572         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
573
574         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
575                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
576
577         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
578
579         AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
580         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
581         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
582                 be_encoded_q_len(mccq->len));
583         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
584
585         be_dws_cpu_to_le(ctxt, sizeof(req->context));
586
587         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
588
589         status = be_mbox_notify_wait(adapter);
590         if (!status) {
591                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
592                 mccq->id = le16_to_cpu(resp->id);
593                 mccq->created = true;
594         }
595         spin_unlock(&adapter->mbox_lock);
596
597         return status;
598 }
599
600 int be_cmd_txq_create(struct be_adapter *adapter,
601                         struct be_queue_info *txq,
602                         struct be_queue_info *cq)
603 {
604         struct be_mcc_wrb *wrb;
605         struct be_cmd_req_eth_tx_create *req;
606         struct be_dma_mem *q_mem = &txq->dma_mem;
607         void *ctxt;
608         int status;
609
610         spin_lock(&adapter->mbox_lock);
611
612         wrb = wrb_from_mbox(adapter);
613         req = embedded_payload(wrb);
614         ctxt = &req->context;
615
616         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
617
618         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
619                 sizeof(*req));
620
621         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
622         req->ulp_num = BE_ULP1_NUM;
623         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
624
625         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
626                 be_encoded_q_len(txq->len));
627         AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
628                         be_pci_func(adapter));
629         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
630         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
631
632         be_dws_cpu_to_le(ctxt, sizeof(req->context));
633
634         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
635
636         status = be_mbox_notify_wait(adapter);
637         if (!status) {
638                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
639                 txq->id = le16_to_cpu(resp->cid);
640                 txq->created = true;
641         }
642
643         spin_unlock(&adapter->mbox_lock);
644
645         return status;
646 }
647
648 /* Uses mbox */
649 int be_cmd_rxq_create(struct be_adapter *adapter,
650                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
651                 u16 max_frame_size, u32 if_id, u32 rss)
652 {
653         struct be_mcc_wrb *wrb;
654         struct be_cmd_req_eth_rx_create *req;
655         struct be_dma_mem *q_mem = &rxq->dma_mem;
656         int status;
657
658         spin_lock(&adapter->mbox_lock);
659
660         wrb = wrb_from_mbox(adapter);
661         req = embedded_payload(wrb);
662
663         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
664
665         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
666                 sizeof(*req));
667
668         req->cq_id = cpu_to_le16(cq_id);
669         req->frag_size = fls(frag_size) - 1;
670         req->num_pages = 2;
671         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
672         req->interface_id = cpu_to_le32(if_id);
673         req->max_frame_size = cpu_to_le16(max_frame_size);
674         req->rss_queue = cpu_to_le32(rss);
675
676         status = be_mbox_notify_wait(adapter);
677         if (!status) {
678                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
679                 rxq->id = le16_to_cpu(resp->id);
680                 rxq->created = true;
681         }
682
683         spin_unlock(&adapter->mbox_lock);
684
685         return status;
686 }
687
688 /* Generic destroyer function for all types of queues
689  * Uses Mbox
690  */
691 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
692                 int queue_type)
693 {
694         struct be_mcc_wrb *wrb;
695         struct be_cmd_req_q_destroy *req;
696         u8 subsys = 0, opcode = 0;
697         int status;
698
699         spin_lock(&adapter->mbox_lock);
700
701         wrb = wrb_from_mbox(adapter);
702         req = embedded_payload(wrb);
703
704         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
705
706         switch (queue_type) {
707         case QTYPE_EQ:
708                 subsys = CMD_SUBSYSTEM_COMMON;
709                 opcode = OPCODE_COMMON_EQ_DESTROY;
710                 break;
711         case QTYPE_CQ:
712                 subsys = CMD_SUBSYSTEM_COMMON;
713                 opcode = OPCODE_COMMON_CQ_DESTROY;
714                 break;
715         case QTYPE_TXQ:
716                 subsys = CMD_SUBSYSTEM_ETH;
717                 opcode = OPCODE_ETH_TX_DESTROY;
718                 break;
719         case QTYPE_RXQ:
720                 subsys = CMD_SUBSYSTEM_ETH;
721                 opcode = OPCODE_ETH_RX_DESTROY;
722                 break;
723         case QTYPE_MCCQ:
724                 subsys = CMD_SUBSYSTEM_COMMON;
725                 opcode = OPCODE_COMMON_MCC_DESTROY;
726                 break;
727         default:
728                 BUG();
729         }
730         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
731         req->id = cpu_to_le16(q->id);
732
733         status = be_mbox_notify_wait(adapter);
734
735         spin_unlock(&adapter->mbox_lock);
736
737         return status;
738 }
739
740 /* Create an rx filtering policy configuration on an i/f
741  * Uses mbox
742  */
743 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
744                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
745 {
746         struct be_mcc_wrb *wrb;
747         struct be_cmd_req_if_create *req;
748         int status;
749
750         spin_lock(&adapter->mbox_lock);
751
752         wrb = wrb_from_mbox(adapter);
753         req = embedded_payload(wrb);
754
755         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
756
757         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
758                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
759
760         req->capability_flags = cpu_to_le32(cap_flags);
761         req->enable_flags = cpu_to_le32(en_flags);
762         req->pmac_invalid = pmac_invalid;
763         if (!pmac_invalid)
764                 memcpy(req->mac_addr, mac, ETH_ALEN);
765
766         status = be_mbox_notify_wait(adapter);
767         if (!status) {
768                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
769                 *if_handle = le32_to_cpu(resp->interface_id);
770                 if (!pmac_invalid)
771                         *pmac_id = le32_to_cpu(resp->pmac_id);
772         }
773
774         spin_unlock(&adapter->mbox_lock);
775         return status;
776 }
777
778 /* Uses mbox */
779 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
780 {
781         struct be_mcc_wrb *wrb;
782         struct be_cmd_req_if_destroy *req;
783         int status;
784
785         spin_lock(&adapter->mbox_lock);
786
787         wrb = wrb_from_mbox(adapter);
788         req = embedded_payload(wrb);
789
790         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
791
792         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
793                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
794
795         req->interface_id = cpu_to_le32(interface_id);
796
797         status = be_mbox_notify_wait(adapter);
798
799         spin_unlock(&adapter->mbox_lock);
800
801         return status;
802 }
803
804 /* Get stats is a non embedded command: the request is not embedded inside
805  * WRB but is a separate dma memory block
806  * Uses asynchronous MCC
807  */
808 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
809 {
810         struct be_mcc_wrb *wrb;
811         struct be_cmd_req_get_stats *req;
812         struct be_sge *sge;
813
814         spin_lock_bh(&adapter->mcc_lock);
815
816         wrb = wrb_from_mccq(adapter);
817         req = nonemb_cmd->va;
818         sge = nonembedded_sgl(wrb);
819
820         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
821         wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
822
823         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
824                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
825         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
826         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
827         sge->len = cpu_to_le32(nonemb_cmd->size);
828
829         be_mcc_notify(adapter);
830
831         spin_unlock_bh(&adapter->mcc_lock);
832         return 0;
833 }
834
835 /* Uses synchronous mcc */
836 int be_cmd_link_status_query(struct be_adapter *adapter,
837                         bool *link_up, u8 *mac_speed, u16 *link_speed)
838 {
839         struct be_mcc_wrb *wrb;
840         struct be_cmd_req_link_status *req;
841         int status;
842
843         spin_lock_bh(&adapter->mcc_lock);
844
845         wrb = wrb_from_mccq(adapter);
846         req = embedded_payload(wrb);
847
848         *link_up = false;
849
850         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
851
852         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
853                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
854
855         status = be_mcc_notify_wait(adapter);
856         if (!status) {
857                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
858                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
859                         *link_up = true;
860                         *link_speed = le16_to_cpu(resp->link_speed);
861                         *mac_speed = resp->mac_speed;
862                 }
863         }
864
865         spin_unlock_bh(&adapter->mcc_lock);
866         return status;
867 }
868
869 /* Uses Mbox */
870 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
871 {
872         struct be_mcc_wrb *wrb;
873         struct be_cmd_req_get_fw_version *req;
874         int status;
875
876         spin_lock(&adapter->mbox_lock);
877
878         wrb = wrb_from_mbox(adapter);
879         req = embedded_payload(wrb);
880
881         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
882
883         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
884                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
885
886         status = be_mbox_notify_wait(adapter);
887         if (!status) {
888                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
889                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
890         }
891
892         spin_unlock(&adapter->mbox_lock);
893         return status;
894 }
895
896 /* set the EQ delay interval of an EQ to specified value
897  * Uses async mcc
898  */
899 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
900 {
901         struct be_mcc_wrb *wrb;
902         struct be_cmd_req_modify_eq_delay *req;
903
904         spin_lock_bh(&adapter->mcc_lock);
905
906         wrb = wrb_from_mccq(adapter);
907         req = embedded_payload(wrb);
908
909         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
910
911         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
912                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
913
914         req->num_eq = cpu_to_le32(1);
915         req->delay[0].eq_id = cpu_to_le32(eq_id);
916         req->delay[0].phase = 0;
917         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
918
919         be_mcc_notify(adapter);
920
921         spin_unlock_bh(&adapter->mcc_lock);
922         return 0;
923 }
924
925 /* Uses sycnhronous mcc */
926 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
927                         u32 num, bool untagged, bool promiscuous)
928 {
929         struct be_mcc_wrb *wrb;
930         struct be_cmd_req_vlan_config *req;
931         int status;
932
933         spin_lock_bh(&adapter->mcc_lock);
934
935         wrb = wrb_from_mccq(adapter);
936         req = embedded_payload(wrb);
937
938         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
939
940         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
941                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
942
943         req->interface_id = if_id;
944         req->promiscuous = promiscuous;
945         req->untagged = untagged;
946         req->num_vlan = num;
947         if (!promiscuous) {
948                 memcpy(req->normal_vlan, vtag_array,
949                         req->num_vlan * sizeof(vtag_array[0]));
950         }
951
952         status = be_mcc_notify_wait(adapter);
953
954         spin_unlock_bh(&adapter->mcc_lock);
955         return status;
956 }
957
958 /* Uses MCC for this command as it may be called in BH context
959  * Uses synchronous mcc
960  */
961 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
962 {
963         struct be_mcc_wrb *wrb;
964         struct be_cmd_req_promiscuous_config *req;
965         int status;
966
967         spin_lock_bh(&adapter->mcc_lock);
968
969         wrb = wrb_from_mccq(adapter);
970         req = embedded_payload(wrb);
971
972         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
973
974         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
975                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
976
977         if (port_num)
978                 req->port1_promiscuous = en;
979         else
980                 req->port0_promiscuous = en;
981
982         status = be_mcc_notify_wait(adapter);
983
984         spin_unlock_bh(&adapter->mcc_lock);
985         return status;
986 }
987
988 /*
989  * Uses MCC for this command as it may be called in BH context
990  * (mc == NULL) => multicast promiscous
991  */
992 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
993                 struct dev_mc_list *mc_list, u32 mc_count)
994 {
995 #define BE_MAX_MC               32 /* set mcast promisc if > 32 */
996         struct be_mcc_wrb *wrb;
997         struct be_cmd_req_mcast_mac_config *req;
998
999         spin_lock_bh(&adapter->mcc_lock);
1000
1001         wrb = wrb_from_mccq(adapter);
1002         req = embedded_payload(wrb);
1003
1004         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1005
1006         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1007                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1008
1009         req->interface_id = if_id;
1010         if (mc_list && mc_count <= BE_MAX_MC) {
1011                 int i;
1012                 struct dev_mc_list *mc;
1013
1014                 req->num_mac = cpu_to_le16(mc_count);
1015
1016                 for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
1017                         memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
1018         } else {
1019                 req->promiscuous = 1;
1020         }
1021
1022         be_mcc_notify_wait(adapter);
1023
1024         spin_unlock_bh(&adapter->mcc_lock);
1025
1026         return 0;
1027 }
1028
1029 /* Uses synchrounous mcc */
1030 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1031 {
1032         struct be_mcc_wrb *wrb;
1033         struct be_cmd_req_set_flow_control *req;
1034         int status;
1035
1036         spin_lock_bh(&adapter->mcc_lock);
1037
1038         wrb = wrb_from_mccq(adapter);
1039         req = embedded_payload(wrb);
1040
1041         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1042
1043         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1044                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1045
1046         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1047         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1048
1049         status = be_mcc_notify_wait(adapter);
1050
1051         spin_unlock_bh(&adapter->mcc_lock);
1052         return status;
1053 }
1054
1055 /* Uses sycn mcc */
1056 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1057 {
1058         struct be_mcc_wrb *wrb;
1059         struct be_cmd_req_get_flow_control *req;
1060         int status;
1061
1062         spin_lock_bh(&adapter->mcc_lock);
1063
1064         wrb = wrb_from_mccq(adapter);
1065         req = embedded_payload(wrb);
1066
1067         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1068
1069         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1070                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1071
1072         status = be_mcc_notify_wait(adapter);
1073         if (!status) {
1074                 struct be_cmd_resp_get_flow_control *resp =
1075                                                 embedded_payload(wrb);
1076                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1077                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1078         }
1079
1080         spin_unlock_bh(&adapter->mcc_lock);
1081         return status;
1082 }
1083
1084 /* Uses mbox */
1085 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1086 {
1087         struct be_mcc_wrb *wrb;
1088         struct be_cmd_req_query_fw_cfg *req;
1089         int status;
1090
1091         spin_lock(&adapter->mbox_lock);
1092
1093         wrb = wrb_from_mbox(adapter);
1094         req = embedded_payload(wrb);
1095
1096         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1097
1098         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1099                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1100
1101         status = be_mbox_notify_wait(adapter);
1102         if (!status) {
1103                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1104                 *port_num = le32_to_cpu(resp->phys_port);
1105                 *cap = le32_to_cpu(resp->function_cap);
1106         }
1107
1108         spin_unlock(&adapter->mbox_lock);
1109         return status;
1110 }
1111
1112 /* Uses mbox */
1113 int be_cmd_reset_function(struct be_adapter *adapter)
1114 {
1115         struct be_mcc_wrb *wrb;
1116         struct be_cmd_req_hdr *req;
1117         int status;
1118
1119         spin_lock(&adapter->mbox_lock);
1120
1121         wrb = wrb_from_mbox(adapter);
1122         req = embedded_payload(wrb);
1123
1124         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1125
1126         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1127                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1128
1129         status = be_mbox_notify_wait(adapter);
1130
1131         spin_unlock(&adapter->mbox_lock);
1132         return status;
1133 }
1134
1135 /* Uses sync mcc */
1136 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1137                         u8 bcn, u8 sts, u8 state)
1138 {
1139         struct be_mcc_wrb *wrb;
1140         struct be_cmd_req_enable_disable_beacon *req;
1141         int status;
1142
1143         spin_lock_bh(&adapter->mcc_lock);
1144
1145         wrb = wrb_from_mccq(adapter);
1146         req = embedded_payload(wrb);
1147
1148         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1149
1150         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1151                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1152
1153         req->port_num = port_num;
1154         req->beacon_state = state;
1155         req->beacon_duration = bcn;
1156         req->status_duration = sts;
1157
1158         status = be_mcc_notify_wait(adapter);
1159
1160         spin_unlock_bh(&adapter->mcc_lock);
1161         return status;
1162 }
1163
1164 /* Uses sync mcc */
1165 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1166 {
1167         struct be_mcc_wrb *wrb;
1168         struct be_cmd_req_get_beacon_state *req;
1169         int status;
1170
1171         spin_lock_bh(&adapter->mcc_lock);
1172
1173         wrb = wrb_from_mccq(adapter);
1174         req = embedded_payload(wrb);
1175
1176         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1177
1178         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1179                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1180
1181         req->port_num = port_num;
1182
1183         status = be_mcc_notify_wait(adapter);
1184         if (!status) {
1185                 struct be_cmd_resp_get_beacon_state *resp =
1186                                                 embedded_payload(wrb);
1187                 *state = resp->beacon_state;
1188         }
1189
1190         spin_unlock_bh(&adapter->mcc_lock);
1191         return status;
1192 }
1193
1194 /* Uses sync mcc */
1195 int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1196                                 u8 *connector)
1197 {
1198         struct be_mcc_wrb *wrb;
1199         struct be_cmd_req_port_type *req;
1200         int status;
1201
1202         spin_lock_bh(&adapter->mcc_lock);
1203
1204         wrb = wrb_from_mccq(adapter);
1205         req = embedded_payload(wrb);
1206
1207         be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);
1208
1209         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1210                 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1211
1212         req->port = cpu_to_le32(port);
1213         req->page_num = cpu_to_le32(TR_PAGE_A0);
1214         status = be_mcc_notify_wait(adapter);
1215         if (!status) {
1216                 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1217                         *connector = resp->data.connector;
1218         }
1219
1220         spin_unlock_bh(&adapter->mcc_lock);
1221         return status;
1222 }
1223
1224 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1225                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1226 {
1227         struct be_mcc_wrb *wrb;
1228         struct be_cmd_write_flashrom *req = cmd->va;
1229         struct be_sge *sge;
1230         int status;
1231
1232         spin_lock_bh(&adapter->mcc_lock);
1233
1234         wrb = wrb_from_mccq(adapter);
1235         sge = nonembedded_sgl(wrb);
1236
1237         be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
1238
1239         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1240                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1241         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1242         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1243         sge->len = cpu_to_le32(cmd->size);
1244
1245         req->params.op_type = cpu_to_le32(flash_type);
1246         req->params.op_code = cpu_to_le32(flash_opcode);
1247         req->params.data_buf_size = cpu_to_le32(buf_size);
1248
1249         status = be_mcc_notify_wait(adapter);
1250
1251         spin_unlock_bh(&adapter->mcc_lock);
1252         return status;
1253 }
1254
1255 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
1256 {
1257         struct be_mcc_wrb *wrb;
1258         struct be_cmd_write_flashrom *req;
1259         int status;
1260
1261         spin_lock_bh(&adapter->mcc_lock);
1262
1263         wrb = wrb_from_mccq(adapter);
1264         req = embedded_payload(wrb);
1265
1266         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0);
1267
1268         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1269                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1270
1271         req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
1272         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1273         req->params.offset = 0x3FFFC;
1274         req->params.data_buf_size = 0x4;
1275
1276         status = be_mcc_notify_wait(adapter);
1277         if (!status)
1278                 memcpy(flashed_crc, req->params.data_buf, 4);
1279
1280         spin_unlock_bh(&adapter->mcc_lock);
1281         return status;
1282 }