]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/benet/be_cmds.c
be2net: remove pci_func field from be_adapter struct
[net-next-2.6.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
29 }
30
31 /* To check if valid bit is set, check the entire word as we don't know
32  * the endianness of the data (old entry is host endian while a new entry is
33  * little endian) */
34 static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
35 {
36         if (compl->flags != 0) {
37                 compl->flags = le32_to_cpu(compl->flags);
38                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
39                 return true;
40         } else {
41                 return false;
42         }
43 }
44
45 /* Need to reset the entire word that houses the valid bit */
46 static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
47 {
48         compl->flags = 0;
49 }
50
51 static int be_mcc_compl_process(struct be_adapter *adapter,
52         struct be_mcc_cq_entry *compl)
53 {
54         u16 compl_status, extd_status;
55
56         /* Just swap the status to host endian; mcc tag is opaquely copied
57          * from mcc_wrb */
58         be_dws_le_to_cpu(compl, 4);
59
60         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
61                                 CQE_STATUS_COMPL_MASK;
62         if (compl_status != MCC_STATUS_SUCCESS) {
63                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
64                                 CQE_STATUS_EXTD_MASK;
65                 printk(KERN_WARNING DRV_NAME
66                         " error in cmd completion: status(compl/extd)=%d/%d\n",
67                         compl_status, extd_status);
68                 return -1;
69         }
70         return 0;
71 }
72
73 /* Link state evt is a string of bytes; no need for endian swapping */
74 static void be_async_link_state_process(struct be_adapter *adapter,
75                 struct be_async_event_link_state *evt)
76 {
77         be_link_status_update(adapter,
78                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
79 }
80
81 static inline bool is_link_state_evt(u32 trailer)
82 {
83         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
84                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
85                                 ASYNC_EVENT_CODE_LINK_STATE);
86 }
87
88 static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_adapter *adapter)
89 {
90         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
91         struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
92
93         if (be_mcc_compl_is_new(compl)) {
94                 queue_tail_inc(mcc_cq);
95                 return compl;
96         }
97         return NULL;
98 }
99
100 void be_process_mcc(struct be_adapter *adapter)
101 {
102         struct be_mcc_cq_entry *compl;
103         int num = 0;
104
105         spin_lock_bh(&adapter->mcc_cq_lock);
106         while ((compl = be_mcc_compl_get(adapter))) {
107                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
108                         /* Interpret flags as an async trailer */
109                         BUG_ON(!is_link_state_evt(compl->flags));
110
111                         /* Interpret compl as a async link evt */
112                         be_async_link_state_process(adapter,
113                                 (struct be_async_event_link_state *) compl);
114                 } else {
115                         be_mcc_compl_process(adapter, compl);
116                         atomic_dec(&adapter->mcc_obj.q.used);
117                 }
118                 be_mcc_compl_use(compl);
119                 num++;
120         }
121         if (num)
122                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
123         spin_unlock_bh(&adapter->mcc_cq_lock);
124 }
125
126 /* Wait till no more pending mcc requests are present */
127 static void be_mcc_wait_compl(struct be_adapter *adapter)
128 {
129 #define mcc_timeout             50000 /* 5s timeout */
130         int i;
131         for (i = 0; i < mcc_timeout; i++) {
132                 be_process_mcc(adapter);
133                 if (atomic_read(&adapter->mcc_obj.q.used) == 0)
134                         break;
135                 udelay(100);
136         }
137         if (i == mcc_timeout)
138                 printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
139 }
140
141 /* Notify MCC requests and wait for completion */
142 static void be_mcc_notify_wait(struct be_adapter *adapter)
143 {
144         be_mcc_notify(adapter);
145         be_mcc_wait_compl(adapter);
146 }
147
148 static int be_mbox_db_ready_wait(void __iomem *db)
149 {
150         int cnt = 0, wait = 5;
151         u32 ready;
152
153         do {
154                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
155                 if (ready)
156                         break;
157
158                 if (cnt > 200000) {
159                         printk(KERN_WARNING DRV_NAME
160                                 ": mbox_db poll timed out\n");
161                         return -1;
162                 }
163
164                 if (cnt > 50)
165                         wait = 200;
166                 cnt += wait;
167                 udelay(wait);
168         } while (true);
169
170         return 0;
171 }
172
173 /*
174  * Insert the mailbox address into the doorbell in two steps
175  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
176  */
177 static int be_mbox_db_ring(struct be_adapter *adapter)
178 {
179         int status;
180         u32 val = 0;
181         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
182         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
183         struct be_mcc_mailbox *mbox = mbox_mem->va;
184         struct be_mcc_cq_entry *cqe = &mbox->cqe;
185
186         memset(cqe, 0, sizeof(*cqe));
187
188         val &= ~MPU_MAILBOX_DB_RDY_MASK;
189         val |= MPU_MAILBOX_DB_HI_MASK;
190         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
191         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
192         iowrite32(val, db);
193
194         /* wait for ready to be set */
195         status = be_mbox_db_ready_wait(db);
196         if (status != 0)
197                 return status;
198
199         val = 0;
200         val &= ~MPU_MAILBOX_DB_RDY_MASK;
201         val &= ~MPU_MAILBOX_DB_HI_MASK;
202         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
203         val |= (u32)(mbox_mem->dma >> 4) << 2;
204         iowrite32(val, db);
205
206         status = be_mbox_db_ready_wait(db);
207         if (status != 0)
208                 return status;
209
210         /* A cq entry has been made now */
211         if (be_mcc_compl_is_new(cqe)) {
212                 status = be_mcc_compl_process(adapter, &mbox->cqe);
213                 be_mcc_compl_use(cqe);
214                 if (status)
215                         return status;
216         } else {
217                 printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
218                 return -1;
219         }
220         return 0;
221 }
222
223 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
224 {
225         u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
226
227         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
228         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
229                 return -1;
230         else
231                 return 0;
232 }
233
234 static int be_POST_stage_poll(struct be_adapter *adapter, u16 poll_stage)
235 {
236         u16 stage, cnt, error;
237         for (cnt = 0; cnt < 5000; cnt++) {
238                 error = be_POST_stage_get(adapter, &stage);
239                 if (error)
240                         return -1;
241
242                 if (stage == poll_stage)
243                         break;
244                 udelay(1000);
245         }
246         if (stage != poll_stage)
247                 return -1;
248         return 0;
249 }
250
251
252 int be_cmd_POST(struct be_adapter *adapter)
253 {
254         u16 stage, error;
255
256         error = be_POST_stage_get(adapter, &stage);
257         if (error)
258                 goto err;
259
260         if (stage == POST_STAGE_ARMFW_RDY)
261                 return 0;
262
263         if (stage != POST_STAGE_AWAITING_HOST_RDY)
264                 goto err;
265
266         /* On awaiting host rdy, reset and again poll on awaiting host rdy */
267         iowrite32(POST_STAGE_BE_RESET, adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
268         error = be_POST_stage_poll(adapter, POST_STAGE_AWAITING_HOST_RDY);
269         if (error)
270                 goto err;
271
272         /* Now kickoff POST and poll on armfw ready */
273         iowrite32(POST_STAGE_HOST_RDY, adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
274         error = be_POST_stage_poll(adapter, POST_STAGE_ARMFW_RDY);
275         if (error)
276                 goto err;
277
278         return 0;
279 err:
280         printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
281         return -1;
282 }
283
284 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
285 {
286         return wrb->payload.embedded_payload;
287 }
288
289 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
290 {
291         return &wrb->payload.sgl[0];
292 }
293
294 /* Don't touch the hdr after it's prepared */
295 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
296                                 bool embedded, u8 sge_cnt)
297 {
298         if (embedded)
299                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
300         else
301                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
302                                 MCC_WRB_SGE_CNT_SHIFT;
303         wrb->payload_length = payload_len;
304         be_dws_cpu_to_le(wrb, 20);
305 }
306
307 /* Don't touch the hdr after it's prepared */
308 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
309                                 u8 subsystem, u8 opcode, int cmd_len)
310 {
311         req_hdr->opcode = opcode;
312         req_hdr->subsystem = subsystem;
313         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
314 }
315
316 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
317                         struct be_dma_mem *mem)
318 {
319         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
320         u64 dma = (u64)mem->dma;
321
322         for (i = 0; i < buf_pages; i++) {
323                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
324                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
325                 dma += PAGE_SIZE_4K;
326         }
327 }
328
329 /* Converts interrupt delay in microseconds to multiplier value */
330 static u32 eq_delay_to_mult(u32 usec_delay)
331 {
332 #define MAX_INTR_RATE                   651042
333         const u32 round = 10;
334         u32 multiplier;
335
336         if (usec_delay == 0)
337                 multiplier = 0;
338         else {
339                 u32 interrupt_rate = 1000000 / usec_delay;
340                 /* Max delay, corresponding to the lowest interrupt rate */
341                 if (interrupt_rate == 0)
342                         multiplier = 1023;
343                 else {
344                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
345                         multiplier /= interrupt_rate;
346                         /* Round the multiplier to the closest value.*/
347                         multiplier = (multiplier + round/2) / round;
348                         multiplier = min(multiplier, (u32)1023);
349                 }
350         }
351         return multiplier;
352 }
353
354 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
355 {
356         return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
357 }
358
359 static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
360 {
361         struct be_mcc_wrb *wrb = NULL;
362         if (atomic_read(&mccq->used) < mccq->len) {
363                 wrb = queue_head_node(mccq);
364                 queue_head_inc(mccq);
365                 atomic_inc(&mccq->used);
366                 memset(wrb, 0, sizeof(*wrb));
367         }
368         return wrb;
369 }
370
371 int be_cmd_eq_create(struct be_adapter *adapter,
372                 struct be_queue_info *eq, int eq_delay)
373 {
374         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
375         struct be_cmd_req_eq_create *req = embedded_payload(wrb);
376         struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
377         struct be_dma_mem *q_mem = &eq->dma_mem;
378         int status;
379
380         spin_lock(&adapter->mbox_lock);
381         memset(wrb, 0, sizeof(*wrb));
382
383         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
384
385         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
386                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
387
388         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
389
390         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
391                         be_pci_func(adapter));
392         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
393         /* 4byte eqe*/
394         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
395         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
396                         __ilog2_u32(eq->len/256));
397         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
398                         eq_delay_to_mult(eq_delay));
399         be_dws_cpu_to_le(req->context, sizeof(req->context));
400
401         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
402
403         status = be_mbox_db_ring(adapter);
404         if (!status) {
405                 eq->id = le16_to_cpu(resp->eq_id);
406                 eq->created = true;
407         }
408         spin_unlock(&adapter->mbox_lock);
409         return status;
410 }
411
412 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
413                         u8 type, bool permanent, u32 if_handle)
414 {
415         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
416         struct be_cmd_req_mac_query *req = embedded_payload(wrb);
417         struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
418         int status;
419
420         spin_lock(&adapter->mbox_lock);
421         memset(wrb, 0, sizeof(*wrb));
422
423         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
424
425         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
426                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
427
428         req->type = type;
429         if (permanent) {
430                 req->permanent = 1;
431         } else {
432                 req->if_id = cpu_to_le16((u16)if_handle);
433                 req->permanent = 0;
434         }
435
436         status = be_mbox_db_ring(adapter);
437         if (!status)
438                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
439
440         spin_unlock(&adapter->mbox_lock);
441         return status;
442 }
443
444 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
445                 u32 if_id, u32 *pmac_id)
446 {
447         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
448         struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
449         int status;
450
451         spin_lock(&adapter->mbox_lock);
452         memset(wrb, 0, sizeof(*wrb));
453
454         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
455
456         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
457                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
458
459         req->if_id = cpu_to_le32(if_id);
460         memcpy(req->mac_address, mac_addr, ETH_ALEN);
461
462         status = be_mbox_db_ring(adapter);
463         if (!status) {
464                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
465                 *pmac_id = le32_to_cpu(resp->pmac_id);
466         }
467
468         spin_unlock(&adapter->mbox_lock);
469         return status;
470 }
471
472 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
473 {
474         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
475         struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
476         int status;
477
478         spin_lock(&adapter->mbox_lock);
479         memset(wrb, 0, sizeof(*wrb));
480
481         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
482
483         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
484                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
485
486         req->if_id = cpu_to_le32(if_id);
487         req->pmac_id = cpu_to_le32(pmac_id);
488
489         status = be_mbox_db_ring(adapter);
490         spin_unlock(&adapter->mbox_lock);
491
492         return status;
493 }
494
495 int be_cmd_cq_create(struct be_adapter *adapter,
496                 struct be_queue_info *cq, struct be_queue_info *eq,
497                 bool sol_evts, bool no_delay, int coalesce_wm)
498 {
499         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
500         struct be_cmd_req_cq_create *req = embedded_payload(wrb);
501         struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
502         struct be_dma_mem *q_mem = &cq->dma_mem;
503         void *ctxt = &req->context;
504         int status;
505
506         spin_lock(&adapter->mbox_lock);
507         memset(wrb, 0, sizeof(*wrb));
508
509         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
510
511         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
512                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
513
514         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
515
516         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
517         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
518         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
519                         __ilog2_u32(cq->len/256));
520         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
521         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
522         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
523         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
524         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
525         AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
526         be_dws_cpu_to_le(ctxt, sizeof(req->context));
527
528         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
529
530         status = be_mbox_db_ring(adapter);
531         if (!status) {
532                 cq->id = le16_to_cpu(resp->cq_id);
533                 cq->created = true;
534         }
535         spin_unlock(&adapter->mbox_lock);
536
537         return status;
538 }
539
540 static u32 be_encoded_q_len(int q_len)
541 {
542         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
543         if (len_encoded == 16)
544                 len_encoded = 0;
545         return len_encoded;
546 }
547
548 int be_cmd_mccq_create(struct be_adapter *adapter,
549                         struct be_queue_info *mccq,
550                         struct be_queue_info *cq)
551 {
552         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
553         struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
554         struct be_dma_mem *q_mem = &mccq->dma_mem;
555         void *ctxt = &req->context;
556         int status;
557
558         spin_lock(&adapter->mbox_lock);
559         memset(wrb, 0, sizeof(*wrb));
560
561         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
562
563         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
564                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
565
566         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
567
568         AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
569         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
570         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
571                 be_encoded_q_len(mccq->len));
572         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
573
574         be_dws_cpu_to_le(ctxt, sizeof(req->context));
575
576         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
577
578         status = be_mbox_db_ring(adapter);
579         if (!status) {
580                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
581                 mccq->id = le16_to_cpu(resp->id);
582                 mccq->created = true;
583         }
584         spin_unlock(&adapter->mbox_lock);
585
586         return status;
587 }
588
589 int be_cmd_txq_create(struct be_adapter *adapter,
590                         struct be_queue_info *txq,
591                         struct be_queue_info *cq)
592 {
593         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
594         struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
595         struct be_dma_mem *q_mem = &txq->dma_mem;
596         void *ctxt = &req->context;
597         int status;
598         u32 len_encoded;
599
600         spin_lock(&adapter->mbox_lock);
601         memset(wrb, 0, sizeof(*wrb));
602
603         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
604
605         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
606                 sizeof(*req));
607
608         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
609         req->ulp_num = BE_ULP1_NUM;
610         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
611
612         len_encoded = fls(txq->len); /* log2(len) + 1 */
613         if (len_encoded == 16)
614                 len_encoded = 0;
615         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
616         AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
617                         be_pci_func(adapter));
618         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
619         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
620
621         be_dws_cpu_to_le(ctxt, sizeof(req->context));
622
623         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
624
625         status = be_mbox_db_ring(adapter);
626         if (!status) {
627                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
628                 txq->id = le16_to_cpu(resp->cid);
629                 txq->created = true;
630         }
631         spin_unlock(&adapter->mbox_lock);
632
633         return status;
634 }
635
636 int be_cmd_rxq_create(struct be_adapter *adapter,
637                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
638                 u16 max_frame_size, u32 if_id, u32 rss)
639 {
640         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
641         struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
642         struct be_dma_mem *q_mem = &rxq->dma_mem;
643         int status;
644
645         spin_lock(&adapter->mbox_lock);
646         memset(wrb, 0, sizeof(*wrb));
647
648         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
649
650         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
651                 sizeof(*req));
652
653         req->cq_id = cpu_to_le16(cq_id);
654         req->frag_size = fls(frag_size) - 1;
655         req->num_pages = 2;
656         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
657         req->interface_id = cpu_to_le32(if_id);
658         req->max_frame_size = cpu_to_le16(max_frame_size);
659         req->rss_queue = cpu_to_le32(rss);
660
661         status = be_mbox_db_ring(adapter);
662         if (!status) {
663                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
664                 rxq->id = le16_to_cpu(resp->id);
665                 rxq->created = true;
666         }
667         spin_unlock(&adapter->mbox_lock);
668
669         return status;
670 }
671
672 /* Generic destroyer function for all types of queues */
673 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
674                 int queue_type)
675 {
676         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
677         struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
678         u8 subsys = 0, opcode = 0;
679         int status;
680
681         spin_lock(&adapter->mbox_lock);
682
683         memset(wrb, 0, sizeof(*wrb));
684         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
685
686         switch (queue_type) {
687         case QTYPE_EQ:
688                 subsys = CMD_SUBSYSTEM_COMMON;
689                 opcode = OPCODE_COMMON_EQ_DESTROY;
690                 break;
691         case QTYPE_CQ:
692                 subsys = CMD_SUBSYSTEM_COMMON;
693                 opcode = OPCODE_COMMON_CQ_DESTROY;
694                 break;
695         case QTYPE_TXQ:
696                 subsys = CMD_SUBSYSTEM_ETH;
697                 opcode = OPCODE_ETH_TX_DESTROY;
698                 break;
699         case QTYPE_RXQ:
700                 subsys = CMD_SUBSYSTEM_ETH;
701                 opcode = OPCODE_ETH_RX_DESTROY;
702                 break;
703         case QTYPE_MCCQ:
704                 subsys = CMD_SUBSYSTEM_COMMON;
705                 opcode = OPCODE_COMMON_MCC_DESTROY;
706                 break;
707         default:
708                 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
709                 status = -1;
710                 goto err;
711         }
712         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
713         req->id = cpu_to_le16(q->id);
714
715         status = be_mbox_db_ring(adapter);
716 err:
717         spin_unlock(&adapter->mbox_lock);
718
719         return status;
720 }
721
722 /* Create an rx filtering policy configuration on an i/f */
723 int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
724                 bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
725 {
726         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
727         struct be_cmd_req_if_create *req = embedded_payload(wrb);
728         int status;
729
730         spin_lock(&adapter->mbox_lock);
731         memset(wrb, 0, sizeof(*wrb));
732
733         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
734
735         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
736                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
737
738         req->capability_flags = cpu_to_le32(flags);
739         req->enable_flags = cpu_to_le32(flags);
740         if (!pmac_invalid)
741                 memcpy(req->mac_addr, mac, ETH_ALEN);
742
743         status = be_mbox_db_ring(adapter);
744         if (!status) {
745                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
746                 *if_handle = le32_to_cpu(resp->interface_id);
747                 if (!pmac_invalid)
748                         *pmac_id = le32_to_cpu(resp->pmac_id);
749         }
750
751         spin_unlock(&adapter->mbox_lock);
752         return status;
753 }
754
755 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
756 {
757         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
758         struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
759         int status;
760
761         spin_lock(&adapter->mbox_lock);
762         memset(wrb, 0, sizeof(*wrb));
763
764         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
765
766         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
767                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
768
769         req->interface_id = cpu_to_le32(interface_id);
770         status = be_mbox_db_ring(adapter);
771
772         spin_unlock(&adapter->mbox_lock);
773
774         return status;
775 }
776
777 /* Get stats is a non embedded command: the request is not embedded inside
778  * WRB but is a separate dma memory block
779  */
780 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
781 {
782         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
783         struct be_cmd_req_get_stats *req = nonemb_cmd->va;
784         struct be_sge *sge = nonembedded_sgl(wrb);
785         int status;
786
787         spin_lock(&adapter->mbox_lock);
788         memset(wrb, 0, sizeof(*wrb));
789
790         memset(req, 0, sizeof(*req));
791
792         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
793
794         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
795                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
796         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
797         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
798         sge->len = cpu_to_le32(nonemb_cmd->size);
799
800         status = be_mbox_db_ring(adapter);
801         if (!status) {
802                 struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
803                 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
804         }
805
806         spin_unlock(&adapter->mbox_lock);
807         return status;
808 }
809
810 int be_cmd_link_status_query(struct be_adapter *adapter,
811                         bool *link_up)
812 {
813         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
814         struct be_cmd_req_link_status *req = embedded_payload(wrb);
815         int status;
816
817         spin_lock(&adapter->mbox_lock);
818
819         *link_up = false;
820         memset(wrb, 0, sizeof(*wrb));
821
822         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
823
824         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
825                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
826
827         status = be_mbox_db_ring(adapter);
828         if (!status) {
829                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
830                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
831                         *link_up = true;
832         }
833
834         spin_unlock(&adapter->mbox_lock);
835         return status;
836 }
837
838 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
839 {
840         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
841         struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
842         int status;
843
844         spin_lock(&adapter->mbox_lock);
845         memset(wrb, 0, sizeof(*wrb));
846
847         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
848
849         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
850                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
851
852         status = be_mbox_db_ring(adapter);
853         if (!status) {
854                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
855                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
856         }
857
858         spin_unlock(&adapter->mbox_lock);
859         return status;
860 }
861
862 /* set the EQ delay interval of an EQ to specified value */
863 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
864 {
865         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
866         struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
867         int status;
868
869         spin_lock(&adapter->mbox_lock);
870         memset(wrb, 0, sizeof(*wrb));
871
872         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
873
874         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
875                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
876
877         req->num_eq = cpu_to_le32(1);
878         req->delay[0].eq_id = cpu_to_le32(eq_id);
879         req->delay[0].phase = 0;
880         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
881
882         status = be_mbox_db_ring(adapter);
883
884         spin_unlock(&adapter->mbox_lock);
885         return status;
886 }
887
888 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
889                         u32 num, bool untagged, bool promiscuous)
890 {
891         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
892         struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
893         int status;
894
895         spin_lock(&adapter->mbox_lock);
896         memset(wrb, 0, sizeof(*wrb));
897
898         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
899
900         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
901                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
902
903         req->interface_id = if_id;
904         req->promiscuous = promiscuous;
905         req->untagged = untagged;
906         req->num_vlan = num;
907         if (!promiscuous) {
908                 memcpy(req->normal_vlan, vtag_array,
909                         req->num_vlan * sizeof(vtag_array[0]));
910         }
911
912         status = be_mbox_db_ring(adapter);
913
914         spin_unlock(&adapter->mbox_lock);
915         return status;
916 }
917
918 /* Use MCC for this command as it may be called in BH context */
919 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
920 {
921         struct be_mcc_wrb *wrb;
922         struct be_cmd_req_promiscuous_config *req;
923
924         spin_lock_bh(&adapter->mcc_lock);
925
926         wrb = wrb_from_mcc(&adapter->mcc_obj.q);
927         BUG_ON(!wrb);
928
929         req = embedded_payload(wrb);
930
931         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
932
933         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
934                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
935
936         if (port_num)
937                 req->port1_promiscuous = en;
938         else
939                 req->port0_promiscuous = en;
940
941         be_mcc_notify_wait(adapter);
942
943         spin_unlock_bh(&adapter->mcc_lock);
944         return 0;
945 }
946
947 /*
948  * Use MCC for this command as it may be called in BH context
949  * (mc == NULL) => multicast promiscous
950  */
951 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
952                 struct dev_mc_list *mc_list, u32 mc_count)
953 {
954 #define BE_MAX_MC               32 /* set mcast promisc if > 32 */
955         struct be_mcc_wrb *wrb;
956         struct be_cmd_req_mcast_mac_config *req;
957
958         spin_lock_bh(&adapter->mcc_lock);
959
960         wrb = wrb_from_mcc(&adapter->mcc_obj.q);
961         BUG_ON(!wrb);
962
963         req = embedded_payload(wrb);
964
965         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
966
967         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
968                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
969
970         req->interface_id = if_id;
971         if (mc_list && mc_count <= BE_MAX_MC) {
972                 int i;
973                 struct dev_mc_list *mc;
974
975                 req->num_mac = cpu_to_le16(mc_count);
976
977                 for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
978                         memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
979         } else {
980                 req->promiscuous = 1;
981         }
982
983         be_mcc_notify_wait(adapter);
984
985         spin_unlock_bh(&adapter->mcc_lock);
986
987         return 0;
988 }
989
990 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
991 {
992         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
993         struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
994         int status;
995
996         spin_lock(&adapter->mbox_lock);
997
998         memset(wrb, 0, sizeof(*wrb));
999
1000         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1001
1002         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1003                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1004
1005         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1006         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1007
1008         status = be_mbox_db_ring(adapter);
1009
1010         spin_unlock(&adapter->mbox_lock);
1011         return status;
1012 }
1013
1014 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1015 {
1016         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
1017         struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
1018         int status;
1019
1020         spin_lock(&adapter->mbox_lock);
1021
1022         memset(wrb, 0, sizeof(*wrb));
1023
1024         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1025
1026         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1027                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1028
1029         status = be_mbox_db_ring(adapter);
1030         if (!status) {
1031                 struct be_cmd_resp_get_flow_control *resp =
1032                                                 embedded_payload(wrb);
1033                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1034                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1035         }
1036
1037         spin_unlock(&adapter->mbox_lock);
1038         return status;
1039 }
1040
1041 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
1042 {
1043         struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
1044         struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
1045         int status;
1046
1047         spin_lock(&adapter->mbox_lock);
1048
1049         memset(wrb, 0, sizeof(*wrb));
1050
1051         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1052
1053         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1054                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1055
1056         status = be_mbox_db_ring(adapter);
1057         if (!status) {
1058                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1059                 *port_num = le32_to_cpu(resp->phys_port);
1060         }
1061
1062         spin_unlock(&adapter->mbox_lock);
1063         return status;
1064 }