]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/benet/be_cmds.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 }
30
31 /* To check if valid bit is set, check the entire word as we don't know
32  * the endianness of the data (old entry is host endian while a new entry is
33  * little endian) */
34 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
35 {
36         if (compl->flags != 0) {
37                 compl->flags = le32_to_cpu(compl->flags);
38                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
39                 return true;
40         } else {
41                 return false;
42         }
43 }
44
45 /* Need to reset the entire word that houses the valid bit */
46 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
47 {
48         compl->flags = 0;
49 }
50
51 static int be_mcc_compl_process(struct be_adapter *adapter,
52         struct be_mcc_compl *compl)
53 {
54         u16 compl_status, extd_status;
55
56         /* Just swap the status to host endian; mcc tag is opaquely copied
57          * from mcc_wrb */
58         be_dws_le_to_cpu(compl, 4);
59
60         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
61                                 CQE_STATUS_COMPL_MASK;
62         if (compl_status == MCC_STATUS_SUCCESS) {
63                 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
64                         struct be_cmd_resp_get_stats *resp =
65                                                 adapter->stats.cmd.va;
66                         be_dws_le_to_cpu(&resp->hw_stats,
67                                                 sizeof(resp->hw_stats));
68                         netdev_stats_update(adapter);
69                 }
70         } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
71                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
72                                 CQE_STATUS_EXTD_MASK;
73                 dev_warn(&adapter->pdev->dev,
74                         "Error in cmd completion: status(compl/extd)=%d/%d\n",
75                         compl_status, extd_status);
76         }
77         return compl_status;
78 }
79
80 /* Link state evt is a string of bytes; no need for endian swapping */
81 static void be_async_link_state_process(struct be_adapter *adapter,
82                 struct be_async_event_link_state *evt)
83 {
84         be_link_status_update(adapter,
85                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
86 }
87
88 static inline bool is_link_state_evt(u32 trailer)
89 {
90         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
91                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
92                                 ASYNC_EVENT_CODE_LINK_STATE);
93 }
94
95 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
96 {
97         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
98         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
99
100         if (be_mcc_compl_is_new(compl)) {
101                 queue_tail_inc(mcc_cq);
102                 return compl;
103         }
104         return NULL;
105 }
106
107 int be_process_mcc(struct be_adapter *adapter)
108 {
109         struct be_mcc_compl *compl;
110         int num = 0, status = 0;
111
112         spin_lock_bh(&adapter->mcc_cq_lock);
113         while ((compl = be_mcc_compl_get(adapter))) {
114                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
115                         /* Interpret flags as an async trailer */
116                         BUG_ON(!is_link_state_evt(compl->flags));
117
118                         /* Interpret compl as a async link evt */
119                         be_async_link_state_process(adapter,
120                                 (struct be_async_event_link_state *) compl);
121                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
122                                 status = be_mcc_compl_process(adapter, compl);
123                                 atomic_dec(&adapter->mcc_obj.q.used);
124                 }
125                 be_mcc_compl_use(compl);
126                 num++;
127         }
128
129         if (num)
130                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
131
132         spin_unlock_bh(&adapter->mcc_cq_lock);
133         return status;
134 }
135
136 /* Wait till no more pending mcc requests are present */
137 static int be_mcc_wait_compl(struct be_adapter *adapter)
138 {
139 #define mcc_timeout             120000 /* 12s timeout */
140         int i, status;
141         for (i = 0; i < mcc_timeout; i++) {
142                 status = be_process_mcc(adapter);
143                 if (status)
144                         return status;
145
146                 if (atomic_read(&adapter->mcc_obj.q.used) == 0)
147                         break;
148                 udelay(100);
149         }
150         if (i == mcc_timeout) {
151                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
152                 return -1;
153         }
154         return 0;
155 }
156
157 /* Notify MCC requests and wait for completion */
158 static int be_mcc_notify_wait(struct be_adapter *adapter)
159 {
160         be_mcc_notify(adapter);
161         return be_mcc_wait_compl(adapter);
162 }
163
164 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
165 {
166         int cnt = 0, wait = 5;
167         u32 ready;
168
169         do {
170                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
171                 if (ready)
172                         break;
173
174                 if (cnt > 4000000) {
175                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
176                         return -1;
177                 }
178
179                 if (cnt > 50)
180                         wait = 200;
181                 cnt += wait;
182                 udelay(wait);
183         } while (true);
184
185         return 0;
186 }
187
188 /*
189  * Insert the mailbox address into the doorbell in two steps
190  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
191  */
192 static int be_mbox_notify_wait(struct be_adapter *adapter)
193 {
194         int status;
195         u32 val = 0;
196         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
197         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
198         struct be_mcc_mailbox *mbox = mbox_mem->va;
199         struct be_mcc_compl *compl = &mbox->compl;
200
201         val |= MPU_MAILBOX_DB_HI_MASK;
202         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
203         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
204         iowrite32(val, db);
205
206         /* wait for ready to be set */
207         status = be_mbox_db_ready_wait(adapter, db);
208         if (status != 0)
209                 return status;
210
211         val = 0;
212         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
213         val |= (u32)(mbox_mem->dma >> 4) << 2;
214         iowrite32(val, db);
215
216         status = be_mbox_db_ready_wait(adapter, db);
217         if (status != 0)
218                 return status;
219
220         /* A cq entry has been made now */
221         if (be_mcc_compl_is_new(compl)) {
222                 status = be_mcc_compl_process(adapter, &mbox->compl);
223                 be_mcc_compl_use(compl);
224                 if (status)
225                         return status;
226         } else {
227                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
228                 return -1;
229         }
230         return 0;
231 }
232
233 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
234 {
235         u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
236
237         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
238         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
239                 return -1;
240         else
241                 return 0;
242 }
243
244 int be_cmd_POST(struct be_adapter *adapter)
245 {
246         u16 stage;
247         int status, timeout = 0;
248
249         do {
250                 status = be_POST_stage_get(adapter, &stage);
251                 if (status) {
252                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
253                                 stage);
254                         return -1;
255                 } else if (stage != POST_STAGE_ARMFW_RDY) {
256                         set_current_state(TASK_INTERRUPTIBLE);
257                         schedule_timeout(2 * HZ);
258                         timeout += 2;
259                 } else {
260                         return 0;
261                 }
262         } while (timeout < 20);
263
264         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
265         return -1;
266 }
267
268 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
269 {
270         return wrb->payload.embedded_payload;
271 }
272
273 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
274 {
275         return &wrb->payload.sgl[0];
276 }
277
278 /* Don't touch the hdr after it's prepared */
279 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
280                                 bool embedded, u8 sge_cnt)
281 {
282         if (embedded)
283                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
284         else
285                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
286                                 MCC_WRB_SGE_CNT_SHIFT;
287         wrb->payload_length = payload_len;
288         be_dws_cpu_to_le(wrb, 20);
289 }
290
291 /* Don't touch the hdr after it's prepared */
292 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
293                                 u8 subsystem, u8 opcode, int cmd_len)
294 {
295         req_hdr->opcode = opcode;
296         req_hdr->subsystem = subsystem;
297         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
298 }
299
300 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
301                         struct be_dma_mem *mem)
302 {
303         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
304         u64 dma = (u64)mem->dma;
305
306         for (i = 0; i < buf_pages; i++) {
307                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
308                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
309                 dma += PAGE_SIZE_4K;
310         }
311 }
312
313 /* Converts interrupt delay in microseconds to multiplier value */
314 static u32 eq_delay_to_mult(u32 usec_delay)
315 {
316 #define MAX_INTR_RATE                   651042
317         const u32 round = 10;
318         u32 multiplier;
319
320         if (usec_delay == 0)
321                 multiplier = 0;
322         else {
323                 u32 interrupt_rate = 1000000 / usec_delay;
324                 /* Max delay, corresponding to the lowest interrupt rate */
325                 if (interrupt_rate == 0)
326                         multiplier = 1023;
327                 else {
328                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
329                         multiplier /= interrupt_rate;
330                         /* Round the multiplier to the closest value.*/
331                         multiplier = (multiplier + round/2) / round;
332                         multiplier = min(multiplier, (u32)1023);
333                 }
334         }
335         return multiplier;
336 }
337
338 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
339 {
340         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
341         struct be_mcc_wrb *wrb
342                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
343         memset(wrb, 0, sizeof(*wrb));
344         return wrb;
345 }
346
347 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
348 {
349         struct be_queue_info *mccq = &adapter->mcc_obj.q;
350         struct be_mcc_wrb *wrb;
351
352         if (atomic_read(&mccq->used) >= mccq->len) {
353                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
354                 return NULL;
355         }
356
357         wrb = queue_head_node(mccq);
358         queue_head_inc(mccq);
359         atomic_inc(&mccq->used);
360         memset(wrb, 0, sizeof(*wrb));
361         return wrb;
362 }
363
364 /* Tell fw we're about to start firing cmds by writing a
365  * special pattern across the wrb hdr; uses mbox
366  */
367 int be_cmd_fw_init(struct be_adapter *adapter)
368 {
369         u8 *wrb;
370         int status;
371
372         spin_lock(&adapter->mbox_lock);
373
374         wrb = (u8 *)wrb_from_mbox(adapter);
375         *wrb++ = 0xFF;
376         *wrb++ = 0x12;
377         *wrb++ = 0x34;
378         *wrb++ = 0xFF;
379         *wrb++ = 0xFF;
380         *wrb++ = 0x56;
381         *wrb++ = 0x78;
382         *wrb = 0xFF;
383
384         status = be_mbox_notify_wait(adapter);
385
386         spin_unlock(&adapter->mbox_lock);
387         return status;
388 }
389
390 /* Tell fw we're done with firing cmds by writing a
391  * special pattern across the wrb hdr; uses mbox
392  */
393 int be_cmd_fw_clean(struct be_adapter *adapter)
394 {
395         u8 *wrb;
396         int status;
397
398         spin_lock(&adapter->mbox_lock);
399
400         wrb = (u8 *)wrb_from_mbox(adapter);
401         *wrb++ = 0xFF;
402         *wrb++ = 0xAA;
403         *wrb++ = 0xBB;
404         *wrb++ = 0xFF;
405         *wrb++ = 0xFF;
406         *wrb++ = 0xCC;
407         *wrb++ = 0xDD;
408         *wrb = 0xFF;
409
410         status = be_mbox_notify_wait(adapter);
411
412         spin_unlock(&adapter->mbox_lock);
413         return status;
414 }
415 int be_cmd_eq_create(struct be_adapter *adapter,
416                 struct be_queue_info *eq, int eq_delay)
417 {
418         struct be_mcc_wrb *wrb;
419         struct be_cmd_req_eq_create *req;
420         struct be_dma_mem *q_mem = &eq->dma_mem;
421         int status;
422
423         spin_lock(&adapter->mbox_lock);
424
425         wrb = wrb_from_mbox(adapter);
426         req = embedded_payload(wrb);
427
428         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
429
430         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
431                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
432
433         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
434
435         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
436                         be_pci_func(adapter));
437         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
438         /* 4byte eqe*/
439         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
440         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
441                         __ilog2_u32(eq->len/256));
442         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
443                         eq_delay_to_mult(eq_delay));
444         be_dws_cpu_to_le(req->context, sizeof(req->context));
445
446         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
447
448         status = be_mbox_notify_wait(adapter);
449         if (!status) {
450                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
451                 eq->id = le16_to_cpu(resp->eq_id);
452                 eq->created = true;
453         }
454
455         spin_unlock(&adapter->mbox_lock);
456         return status;
457 }
458
459 /* Uses mbox */
460 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
461                         u8 type, bool permanent, u32 if_handle)
462 {
463         struct be_mcc_wrb *wrb;
464         struct be_cmd_req_mac_query *req;
465         int status;
466
467         spin_lock(&adapter->mbox_lock);
468
469         wrb = wrb_from_mbox(adapter);
470         req = embedded_payload(wrb);
471
472         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
473
474         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
475                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
476
477         req->type = type;
478         if (permanent) {
479                 req->permanent = 1;
480         } else {
481                 req->if_id = cpu_to_le16((u16) if_handle);
482                 req->permanent = 0;
483         }
484
485         status = be_mbox_notify_wait(adapter);
486         if (!status) {
487                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
488                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
489         }
490
491         spin_unlock(&adapter->mbox_lock);
492         return status;
493 }
494
495 /* Uses synchronous MCCQ */
496 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
497                 u32 if_id, u32 *pmac_id)
498 {
499         struct be_mcc_wrb *wrb;
500         struct be_cmd_req_pmac_add *req;
501         int status;
502
503         spin_lock_bh(&adapter->mcc_lock);
504
505         wrb = wrb_from_mccq(adapter);
506         if (!wrb) {
507                 status = -EBUSY;
508                 goto err;
509         }
510         req = embedded_payload(wrb);
511
512         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
513
514         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
515                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
516
517         req->if_id = cpu_to_le32(if_id);
518         memcpy(req->mac_address, mac_addr, ETH_ALEN);
519
520         status = be_mcc_notify_wait(adapter);
521         if (!status) {
522                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
523                 *pmac_id = le32_to_cpu(resp->pmac_id);
524         }
525
526 err:
527         spin_unlock_bh(&adapter->mcc_lock);
528         return status;
529 }
530
531 /* Uses synchronous MCCQ */
532 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
533 {
534         struct be_mcc_wrb *wrb;
535         struct be_cmd_req_pmac_del *req;
536         int status;
537
538         spin_lock_bh(&adapter->mcc_lock);
539
540         wrb = wrb_from_mccq(adapter);
541         if (!wrb) {
542                 status = -EBUSY;
543                 goto err;
544         }
545         req = embedded_payload(wrb);
546
547         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
548
549         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
550                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
551
552         req->if_id = cpu_to_le32(if_id);
553         req->pmac_id = cpu_to_le32(pmac_id);
554
555         status = be_mcc_notify_wait(adapter);
556
557 err:
558         spin_unlock_bh(&adapter->mcc_lock);
559         return status;
560 }
561
562 /* Uses Mbox */
563 int be_cmd_cq_create(struct be_adapter *adapter,
564                 struct be_queue_info *cq, struct be_queue_info *eq,
565                 bool sol_evts, bool no_delay, int coalesce_wm)
566 {
567         struct be_mcc_wrb *wrb;
568         struct be_cmd_req_cq_create *req;
569         struct be_dma_mem *q_mem = &cq->dma_mem;
570         void *ctxt;
571         int status;
572
573         spin_lock(&adapter->mbox_lock);
574
575         wrb = wrb_from_mbox(adapter);
576         req = embedded_payload(wrb);
577         ctxt = &req->context;
578
579         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
580
581         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
582                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
583
584         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
585
586         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
587         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
588         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
589                         __ilog2_u32(cq->len/256));
590         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
591         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
592         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
593         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
594         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
595         AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
596         be_dws_cpu_to_le(ctxt, sizeof(req->context));
597
598         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
599
600         status = be_mbox_notify_wait(adapter);
601         if (!status) {
602                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
603                 cq->id = le16_to_cpu(resp->cq_id);
604                 cq->created = true;
605         }
606
607         spin_unlock(&adapter->mbox_lock);
608
609         return status;
610 }
611
612 static u32 be_encoded_q_len(int q_len)
613 {
614         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
615         if (len_encoded == 16)
616                 len_encoded = 0;
617         return len_encoded;
618 }
619
620 int be_cmd_mccq_create(struct be_adapter *adapter,
621                         struct be_queue_info *mccq,
622                         struct be_queue_info *cq)
623 {
624         struct be_mcc_wrb *wrb;
625         struct be_cmd_req_mcc_create *req;
626         struct be_dma_mem *q_mem = &mccq->dma_mem;
627         void *ctxt;
628         int status;
629
630         spin_lock(&adapter->mbox_lock);
631
632         wrb = wrb_from_mbox(adapter);
633         req = embedded_payload(wrb);
634         ctxt = &req->context;
635
636         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
637
638         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
639                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
640
641         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
642
643         AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
644         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
645         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
646                 be_encoded_q_len(mccq->len));
647         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
648
649         be_dws_cpu_to_le(ctxt, sizeof(req->context));
650
651         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
652
653         status = be_mbox_notify_wait(adapter);
654         if (!status) {
655                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
656                 mccq->id = le16_to_cpu(resp->id);
657                 mccq->created = true;
658         }
659         spin_unlock(&adapter->mbox_lock);
660
661         return status;
662 }
663
664 int be_cmd_txq_create(struct be_adapter *adapter,
665                         struct be_queue_info *txq,
666                         struct be_queue_info *cq)
667 {
668         struct be_mcc_wrb *wrb;
669         struct be_cmd_req_eth_tx_create *req;
670         struct be_dma_mem *q_mem = &txq->dma_mem;
671         void *ctxt;
672         int status;
673
674         spin_lock(&adapter->mbox_lock);
675
676         wrb = wrb_from_mbox(adapter);
677         req = embedded_payload(wrb);
678         ctxt = &req->context;
679
680         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
681
682         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
683                 sizeof(*req));
684
685         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
686         req->ulp_num = BE_ULP1_NUM;
687         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
688
689         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
690                 be_encoded_q_len(txq->len));
691         AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
692                         be_pci_func(adapter));
693         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
694         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
695
696         be_dws_cpu_to_le(ctxt, sizeof(req->context));
697
698         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
699
700         status = be_mbox_notify_wait(adapter);
701         if (!status) {
702                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
703                 txq->id = le16_to_cpu(resp->cid);
704                 txq->created = true;
705         }
706
707         spin_unlock(&adapter->mbox_lock);
708
709         return status;
710 }
711
712 /* Uses mbox */
713 int be_cmd_rxq_create(struct be_adapter *adapter,
714                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
715                 u16 max_frame_size, u32 if_id, u32 rss)
716 {
717         struct be_mcc_wrb *wrb;
718         struct be_cmd_req_eth_rx_create *req;
719         struct be_dma_mem *q_mem = &rxq->dma_mem;
720         int status;
721
722         spin_lock(&adapter->mbox_lock);
723
724         wrb = wrb_from_mbox(adapter);
725         req = embedded_payload(wrb);
726
727         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
728
729         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
730                 sizeof(*req));
731
732         req->cq_id = cpu_to_le16(cq_id);
733         req->frag_size = fls(frag_size) - 1;
734         req->num_pages = 2;
735         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
736         req->interface_id = cpu_to_le32(if_id);
737         req->max_frame_size = cpu_to_le16(max_frame_size);
738         req->rss_queue = cpu_to_le32(rss);
739
740         status = be_mbox_notify_wait(adapter);
741         if (!status) {
742                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
743                 rxq->id = le16_to_cpu(resp->id);
744                 rxq->created = true;
745         }
746
747         spin_unlock(&adapter->mbox_lock);
748
749         return status;
750 }
751
752 /* Generic destroyer function for all types of queues
753  * Uses Mbox
754  */
755 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
756                 int queue_type)
757 {
758         struct be_mcc_wrb *wrb;
759         struct be_cmd_req_q_destroy *req;
760         u8 subsys = 0, opcode = 0;
761         int status;
762
763         spin_lock(&adapter->mbox_lock);
764
765         wrb = wrb_from_mbox(adapter);
766         req = embedded_payload(wrb);
767
768         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
769
770         switch (queue_type) {
771         case QTYPE_EQ:
772                 subsys = CMD_SUBSYSTEM_COMMON;
773                 opcode = OPCODE_COMMON_EQ_DESTROY;
774                 break;
775         case QTYPE_CQ:
776                 subsys = CMD_SUBSYSTEM_COMMON;
777                 opcode = OPCODE_COMMON_CQ_DESTROY;
778                 break;
779         case QTYPE_TXQ:
780                 subsys = CMD_SUBSYSTEM_ETH;
781                 opcode = OPCODE_ETH_TX_DESTROY;
782                 break;
783         case QTYPE_RXQ:
784                 subsys = CMD_SUBSYSTEM_ETH;
785                 opcode = OPCODE_ETH_RX_DESTROY;
786                 break;
787         case QTYPE_MCCQ:
788                 subsys = CMD_SUBSYSTEM_COMMON;
789                 opcode = OPCODE_COMMON_MCC_DESTROY;
790                 break;
791         default:
792                 BUG();
793         }
794         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
795         req->id = cpu_to_le16(q->id);
796
797         status = be_mbox_notify_wait(adapter);
798
799         spin_unlock(&adapter->mbox_lock);
800
801         return status;
802 }
803
804 /* Create an rx filtering policy configuration on an i/f
805  * Uses mbox
806  */
807 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
808                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
809 {
810         struct be_mcc_wrb *wrb;
811         struct be_cmd_req_if_create *req;
812         int status;
813
814         spin_lock(&adapter->mbox_lock);
815
816         wrb = wrb_from_mbox(adapter);
817         req = embedded_payload(wrb);
818
819         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
820
821         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
822                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
823
824         req->capability_flags = cpu_to_le32(cap_flags);
825         req->enable_flags = cpu_to_le32(en_flags);
826         req->pmac_invalid = pmac_invalid;
827         if (!pmac_invalid)
828                 memcpy(req->mac_addr, mac, ETH_ALEN);
829
830         status = be_mbox_notify_wait(adapter);
831         if (!status) {
832                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
833                 *if_handle = le32_to_cpu(resp->interface_id);
834                 if (!pmac_invalid)
835                         *pmac_id = le32_to_cpu(resp->pmac_id);
836         }
837
838         spin_unlock(&adapter->mbox_lock);
839         return status;
840 }
841
842 /* Uses mbox */
843 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
844 {
845         struct be_mcc_wrb *wrb;
846         struct be_cmd_req_if_destroy *req;
847         int status;
848
849         spin_lock(&adapter->mbox_lock);
850
851         wrb = wrb_from_mbox(adapter);
852         req = embedded_payload(wrb);
853
854         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
855
856         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
857                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
858
859         req->interface_id = cpu_to_le32(interface_id);
860
861         status = be_mbox_notify_wait(adapter);
862
863         spin_unlock(&adapter->mbox_lock);
864
865         return status;
866 }
867
868 /* Get stats is a non embedded command: the request is not embedded inside
869  * WRB but is a separate dma memory block
870  * Uses asynchronous MCC
871  */
872 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
873 {
874         struct be_mcc_wrb *wrb;
875         struct be_cmd_req_get_stats *req;
876         struct be_sge *sge;
877         int status = 0;
878
879         spin_lock_bh(&adapter->mcc_lock);
880
881         wrb = wrb_from_mccq(adapter);
882         if (!wrb) {
883                 status = -EBUSY;
884                 goto err;
885         }
886         req = nonemb_cmd->va;
887         sge = nonembedded_sgl(wrb);
888
889         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
890         wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
891
892         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
893                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
894         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
895         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
896         sge->len = cpu_to_le32(nonemb_cmd->size);
897
898         be_mcc_notify(adapter);
899
900 err:
901         spin_unlock_bh(&adapter->mcc_lock);
902         return status;
903 }
904
905 /* Uses synchronous mcc */
906 int be_cmd_link_status_query(struct be_adapter *adapter,
907                         bool *link_up, u8 *mac_speed, u16 *link_speed)
908 {
909         struct be_mcc_wrb *wrb;
910         struct be_cmd_req_link_status *req;
911         int status;
912
913         spin_lock_bh(&adapter->mcc_lock);
914
915         wrb = wrb_from_mccq(adapter);
916         if (!wrb) {
917                 status = -EBUSY;
918                 goto err;
919         }
920         req = embedded_payload(wrb);
921
922         *link_up = false;
923
924         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
925
926         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
927                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
928
929         status = be_mcc_notify_wait(adapter);
930         if (!status) {
931                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
932                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
933                         *link_up = true;
934                         *link_speed = le16_to_cpu(resp->link_speed);
935                         *mac_speed = resp->mac_speed;
936                 }
937         }
938
939 err:
940         spin_unlock_bh(&adapter->mcc_lock);
941         return status;
942 }
943
944 /* Uses Mbox */
945 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
946 {
947         struct be_mcc_wrb *wrb;
948         struct be_cmd_req_get_fw_version *req;
949         int status;
950
951         spin_lock(&adapter->mbox_lock);
952
953         wrb = wrb_from_mbox(adapter);
954         req = embedded_payload(wrb);
955
956         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
957
958         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
959                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
960
961         status = be_mbox_notify_wait(adapter);
962         if (!status) {
963                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
964                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
965         }
966
967         spin_unlock(&adapter->mbox_lock);
968         return status;
969 }
970
971 /* set the EQ delay interval of an EQ to specified value
972  * Uses async mcc
973  */
974 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
975 {
976         struct be_mcc_wrb *wrb;
977         struct be_cmd_req_modify_eq_delay *req;
978         int status = 0;
979
980         spin_lock_bh(&adapter->mcc_lock);
981
982         wrb = wrb_from_mccq(adapter);
983         if (!wrb) {
984                 status = -EBUSY;
985                 goto err;
986         }
987         req = embedded_payload(wrb);
988
989         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
990
991         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
992                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
993
994         req->num_eq = cpu_to_le32(1);
995         req->delay[0].eq_id = cpu_to_le32(eq_id);
996         req->delay[0].phase = 0;
997         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
998
999         be_mcc_notify(adapter);
1000
1001 err:
1002         spin_unlock_bh(&adapter->mcc_lock);
1003         return status;
1004 }
1005
1006 /* Uses sycnhronous mcc */
1007 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1008                         u32 num, bool untagged, bool promiscuous)
1009 {
1010         struct be_mcc_wrb *wrb;
1011         struct be_cmd_req_vlan_config *req;
1012         int status;
1013
1014         spin_lock_bh(&adapter->mcc_lock);
1015
1016         wrb = wrb_from_mccq(adapter);
1017         if (!wrb) {
1018                 status = -EBUSY;
1019                 goto err;
1020         }
1021         req = embedded_payload(wrb);
1022
1023         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1024
1025         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1026                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1027
1028         req->interface_id = if_id;
1029         req->promiscuous = promiscuous;
1030         req->untagged = untagged;
1031         req->num_vlan = num;
1032         if (!promiscuous) {
1033                 memcpy(req->normal_vlan, vtag_array,
1034                         req->num_vlan * sizeof(vtag_array[0]));
1035         }
1036
1037         status = be_mcc_notify_wait(adapter);
1038
1039 err:
1040         spin_unlock_bh(&adapter->mcc_lock);
1041         return status;
1042 }
1043
1044 /* Uses MCC for this command as it may be called in BH context
1045  * Uses synchronous mcc
1046  */
1047 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1048 {
1049         struct be_mcc_wrb *wrb;
1050         struct be_cmd_req_promiscuous_config *req;
1051         int status;
1052
1053         spin_lock_bh(&adapter->mcc_lock);
1054
1055         wrb = wrb_from_mccq(adapter);
1056         if (!wrb) {
1057                 status = -EBUSY;
1058                 goto err;
1059         }
1060         req = embedded_payload(wrb);
1061
1062         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1063
1064         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1065                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1066
1067         if (port_num)
1068                 req->port1_promiscuous = en;
1069         else
1070                 req->port0_promiscuous = en;
1071
1072         status = be_mcc_notify_wait(adapter);
1073
1074 err:
1075         spin_unlock_bh(&adapter->mcc_lock);
1076         return status;
1077 }
1078
1079 /*
1080  * Uses MCC for this command as it may be called in BH context
1081  * (mc == NULL) => multicast promiscous
1082  */
1083 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1084                 struct dev_mc_list *mc_list, u32 mc_count,
1085                 struct be_dma_mem *mem)
1086 {
1087         struct be_mcc_wrb *wrb;
1088         struct be_cmd_req_mcast_mac_config *req = mem->va;
1089         struct be_sge *sge;
1090         int status;
1091
1092         spin_lock_bh(&adapter->mcc_lock);
1093
1094         wrb = wrb_from_mccq(adapter);
1095         if (!wrb) {
1096                 status = -EBUSY;
1097                 goto err;
1098         }
1099         sge = nonembedded_sgl(wrb);
1100         memset(req, 0, sizeof(*req));
1101
1102         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1103         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1104         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1105         sge->len = cpu_to_le32(mem->size);
1106
1107         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1108                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1109
1110         req->interface_id = if_id;
1111         if (mc_list) {
1112                 int i;
1113                 struct dev_mc_list *mc;
1114
1115                 req->num_mac = cpu_to_le16(mc_count);
1116
1117                 for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
1118                         memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
1119         } else {
1120                 req->promiscuous = 1;
1121         }
1122
1123         status = be_mcc_notify_wait(adapter);
1124
1125 err:
1126         spin_unlock_bh(&adapter->mcc_lock);
1127         return status;
1128 }
1129
1130 /* Uses synchrounous mcc */
1131 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1132 {
1133         struct be_mcc_wrb *wrb;
1134         struct be_cmd_req_set_flow_control *req;
1135         int status;
1136
1137         spin_lock_bh(&adapter->mcc_lock);
1138
1139         wrb = wrb_from_mccq(adapter);
1140         if (!wrb) {
1141                 status = -EBUSY;
1142                 goto err;
1143         }
1144         req = embedded_payload(wrb);
1145
1146         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1147
1148         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1149                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1150
1151         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1152         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1153
1154         status = be_mcc_notify_wait(adapter);
1155
1156 err:
1157         spin_unlock_bh(&adapter->mcc_lock);
1158         return status;
1159 }
1160
1161 /* Uses sycn mcc */
1162 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1163 {
1164         struct be_mcc_wrb *wrb;
1165         struct be_cmd_req_get_flow_control *req;
1166         int status;
1167
1168         spin_lock_bh(&adapter->mcc_lock);
1169
1170         wrb = wrb_from_mccq(adapter);
1171         if (!wrb) {
1172                 status = -EBUSY;
1173                 goto err;
1174         }
1175         req = embedded_payload(wrb);
1176
1177         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1178
1179         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1180                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1181
1182         status = be_mcc_notify_wait(adapter);
1183         if (!status) {
1184                 struct be_cmd_resp_get_flow_control *resp =
1185                                                 embedded_payload(wrb);
1186                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1187                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1188         }
1189
1190 err:
1191         spin_unlock_bh(&adapter->mcc_lock);
1192         return status;
1193 }
1194
1195 /* Uses mbox */
1196 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1197 {
1198         struct be_mcc_wrb *wrb;
1199         struct be_cmd_req_query_fw_cfg *req;
1200         int status;
1201
1202         spin_lock(&adapter->mbox_lock);
1203
1204         wrb = wrb_from_mbox(adapter);
1205         req = embedded_payload(wrb);
1206
1207         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1208
1209         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1210                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1211
1212         status = be_mbox_notify_wait(adapter);
1213         if (!status) {
1214                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1215                 *port_num = le32_to_cpu(resp->phys_port);
1216                 *cap = le32_to_cpu(resp->function_cap);
1217         }
1218
1219         spin_unlock(&adapter->mbox_lock);
1220         return status;
1221 }
1222
1223 /* Uses mbox */
1224 int be_cmd_reset_function(struct be_adapter *adapter)
1225 {
1226         struct be_mcc_wrb *wrb;
1227         struct be_cmd_req_hdr *req;
1228         int status;
1229
1230         spin_lock(&adapter->mbox_lock);
1231
1232         wrb = wrb_from_mbox(adapter);
1233         req = embedded_payload(wrb);
1234
1235         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1236
1237         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1238                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1239
1240         status = be_mbox_notify_wait(adapter);
1241
1242         spin_unlock(&adapter->mbox_lock);
1243         return status;
1244 }
1245
1246 /* Uses sync mcc */
1247 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1248                         u8 bcn, u8 sts, u8 state)
1249 {
1250         struct be_mcc_wrb *wrb;
1251         struct be_cmd_req_enable_disable_beacon *req;
1252         int status;
1253
1254         spin_lock_bh(&adapter->mcc_lock);
1255
1256         wrb = wrb_from_mccq(adapter);
1257         if (!wrb) {
1258                 status = -EBUSY;
1259                 goto err;
1260         }
1261         req = embedded_payload(wrb);
1262
1263         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1264
1265         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1266                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1267
1268         req->port_num = port_num;
1269         req->beacon_state = state;
1270         req->beacon_duration = bcn;
1271         req->status_duration = sts;
1272
1273         status = be_mcc_notify_wait(adapter);
1274
1275 err:
1276         spin_unlock_bh(&adapter->mcc_lock);
1277         return status;
1278 }
1279
1280 /* Uses sync mcc */
1281 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1282 {
1283         struct be_mcc_wrb *wrb;
1284         struct be_cmd_req_get_beacon_state *req;
1285         int status;
1286
1287         spin_lock_bh(&adapter->mcc_lock);
1288
1289         wrb = wrb_from_mccq(adapter);
1290         if (!wrb) {
1291                 status = -EBUSY;
1292                 goto err;
1293         }
1294         req = embedded_payload(wrb);
1295
1296         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1297
1298         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1299                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1300
1301         req->port_num = port_num;
1302
1303         status = be_mcc_notify_wait(adapter);
1304         if (!status) {
1305                 struct be_cmd_resp_get_beacon_state *resp =
1306                                                 embedded_payload(wrb);
1307                 *state = resp->beacon_state;
1308         }
1309
1310 err:
1311         spin_unlock_bh(&adapter->mcc_lock);
1312         return status;
1313 }
1314
1315 /* Uses sync mcc */
1316 int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1317                                 u8 *connector)
1318 {
1319         struct be_mcc_wrb *wrb;
1320         struct be_cmd_req_port_type *req;
1321         int status;
1322
1323         spin_lock_bh(&adapter->mcc_lock);
1324
1325         wrb = wrb_from_mccq(adapter);
1326         if (!wrb) {
1327                 status = -EBUSY;
1328                 goto err;
1329         }
1330         req = embedded_payload(wrb);
1331
1332         be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);
1333
1334         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1335                 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1336
1337         req->port = cpu_to_le32(port);
1338         req->page_num = cpu_to_le32(TR_PAGE_A0);
1339         status = be_mcc_notify_wait(adapter);
1340         if (!status) {
1341                 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1342                         *connector = resp->data.connector;
1343         }
1344
1345 err:
1346         spin_unlock_bh(&adapter->mcc_lock);
1347         return status;
1348 }
1349
1350 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1351                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1352 {
1353         struct be_mcc_wrb *wrb;
1354         struct be_cmd_write_flashrom *req = cmd->va;
1355         struct be_sge *sge;
1356         int status;
1357
1358         spin_lock_bh(&adapter->mcc_lock);
1359
1360         wrb = wrb_from_mccq(adapter);
1361         if (!wrb) {
1362                 status = -EBUSY;
1363                 goto err;
1364         }
1365         req = cmd->va;
1366         sge = nonembedded_sgl(wrb);
1367
1368         be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
1369
1370         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1371                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1372         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1373         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1374         sge->len = cpu_to_le32(cmd->size);
1375
1376         req->params.op_type = cpu_to_le32(flash_type);
1377         req->params.op_code = cpu_to_le32(flash_opcode);
1378         req->params.data_buf_size = cpu_to_le32(buf_size);
1379
1380         status = be_mcc_notify_wait(adapter);
1381
1382 err:
1383         spin_unlock_bh(&adapter->mcc_lock);
1384         return status;
1385 }
1386
1387 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
1388 {
1389         struct be_mcc_wrb *wrb;
1390         struct be_cmd_write_flashrom *req;
1391         int status;
1392
1393         spin_lock_bh(&adapter->mcc_lock);
1394
1395         wrb = wrb_from_mccq(adapter);
1396         if (!wrb) {
1397                 status = -EBUSY;
1398                 goto err;
1399         }
1400         req = embedded_payload(wrb);
1401
1402         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0);
1403
1404         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1405                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1406
1407         req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
1408         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1409         req->params.offset = 0x3FFFC;
1410         req->params.data_buf_size = 0x4;
1411
1412         status = be_mcc_notify_wait(adapter);
1413         if (!status)
1414                 memcpy(flashed_crc, req->params.data_buf, 4);
1415
1416 err:
1417         spin_unlock_bh(&adapter->mcc_lock);
1418         return status;
1419 }