]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
19 | #include "be_cmds.h" | |
20 | ||
21 | static void be_mcc_notify(struct be_adapter *adapter) | |
22 | { | |
23 | struct be_queue_info *mccq = &adapter->mcc_obj.q; | |
24 | u32 val = 0; | |
25 | ||
26 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | |
27 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | |
28 | iowrite32(val, adapter->db + DB_MCCQ_OFFSET); | |
29 | } | |
30 | ||
31 | /* To check if valid bit is set, check the entire word as we don't know | |
32 | * the endianness of the data (old entry is host endian while a new entry is | |
33 | * little endian) */ | |
34 | static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) | |
35 | { | |
36 | if (compl->flags != 0) { | |
37 | compl->flags = le32_to_cpu(compl->flags); | |
38 | BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); | |
39 | return true; | |
40 | } else { | |
41 | return false; | |
42 | } | |
43 | } | |
44 | ||
45 | /* Need to reset the entire word that houses the valid bit */ | |
46 | static inline void be_mcc_compl_use(struct be_mcc_compl *compl) | |
47 | { | |
48 | compl->flags = 0; | |
49 | } | |
50 | ||
51 | static int be_mcc_compl_process(struct be_adapter *adapter, | |
52 | struct be_mcc_compl *compl) | |
53 | { | |
54 | u16 compl_status, extd_status; | |
55 | ||
56 | /* Just swap the status to host endian; mcc tag is opaquely copied | |
57 | * from mcc_wrb */ | |
58 | be_dws_le_to_cpu(compl, 4); | |
59 | ||
60 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | |
61 | CQE_STATUS_COMPL_MASK; | |
62 | if (compl_status == MCC_STATUS_SUCCESS) { | |
63 | if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { | |
64 | struct be_cmd_resp_get_stats *resp = | |
65 | adapter->stats.cmd.va; | |
66 | be_dws_le_to_cpu(&resp->hw_stats, | |
67 | sizeof(resp->hw_stats)); | |
68 | netdev_stats_update(adapter); | |
69 | } | |
70 | } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) { | |
71 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | |
72 | CQE_STATUS_EXTD_MASK; | |
73 | dev_warn(&adapter->pdev->dev, | |
74 | "Error in cmd completion - opcode %d, compl %d, extd %d\n", | |
75 | compl->tag0, compl_status, extd_status); | |
76 | } | |
77 | return compl_status; | |
78 | } | |
79 | ||
80 | /* Link state evt is a string of bytes; no need for endian swapping */ | |
81 | static void be_async_link_state_process(struct be_adapter *adapter, | |
82 | struct be_async_event_link_state *evt) | |
83 | { | |
84 | be_link_status_update(adapter, | |
85 | evt->port_link_status == ASYNC_EVENT_LINK_UP); | |
86 | } | |
87 | ||
88 | static inline bool is_link_state_evt(u32 trailer) | |
89 | { | |
90 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | |
91 | ASYNC_TRAILER_EVENT_CODE_MASK) == | |
92 | ASYNC_EVENT_CODE_LINK_STATE); | |
93 | } | |
94 | ||
95 | static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) | |
96 | { | |
97 | struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; | |
98 | struct be_mcc_compl *compl = queue_tail_node(mcc_cq); | |
99 | ||
100 | if (be_mcc_compl_is_new(compl)) { | |
101 | queue_tail_inc(mcc_cq); | |
102 | return compl; | |
103 | } | |
104 | return NULL; | |
105 | } | |
106 | ||
107 | int be_process_mcc(struct be_adapter *adapter) | |
108 | { | |
109 | struct be_mcc_compl *compl; | |
110 | int num = 0, status = 0; | |
111 | ||
112 | spin_lock_bh(&adapter->mcc_cq_lock); | |
113 | while ((compl = be_mcc_compl_get(adapter))) { | |
114 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | |
115 | /* Interpret flags as an async trailer */ | |
116 | BUG_ON(!is_link_state_evt(compl->flags)); | |
117 | ||
118 | /* Interpret compl as a async link evt */ | |
119 | be_async_link_state_process(adapter, | |
120 | (struct be_async_event_link_state *) compl); | |
121 | } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { | |
122 | status = be_mcc_compl_process(adapter, compl); | |
123 | atomic_dec(&adapter->mcc_obj.q.used); | |
124 | } | |
125 | be_mcc_compl_use(compl); | |
126 | num++; | |
127 | } | |
128 | ||
129 | if (num) | |
130 | be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num); | |
131 | ||
132 | spin_unlock_bh(&adapter->mcc_cq_lock); | |
133 | return status; | |
134 | } | |
135 | ||
136 | /* Wait till no more pending mcc requests are present */ | |
137 | static int be_mcc_wait_compl(struct be_adapter *adapter) | |
138 | { | |
139 | #define mcc_timeout 120000 /* 12s timeout */ | |
140 | int i, status; | |
141 | for (i = 0; i < mcc_timeout; i++) { | |
142 | status = be_process_mcc(adapter); | |
143 | if (status) | |
144 | return status; | |
145 | ||
146 | if (atomic_read(&adapter->mcc_obj.q.used) == 0) | |
147 | break; | |
148 | udelay(100); | |
149 | } | |
150 | if (i == mcc_timeout) { | |
151 | dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); | |
152 | return -1; | |
153 | } | |
154 | return 0; | |
155 | } | |
156 | ||
157 | /* Notify MCC requests and wait for completion */ | |
158 | static int be_mcc_notify_wait(struct be_adapter *adapter) | |
159 | { | |
160 | be_mcc_notify(adapter); | |
161 | return be_mcc_wait_compl(adapter); | |
162 | } | |
163 | ||
164 | static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) | |
165 | { | |
166 | int cnt = 0, wait = 5; | |
167 | u32 ready; | |
168 | ||
169 | do { | |
170 | ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; | |
171 | if (ready) | |
172 | break; | |
173 | ||
174 | if (cnt > 4000000) { | |
175 | dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); | |
176 | return -1; | |
177 | } | |
178 | ||
179 | if (cnt > 50) | |
180 | wait = 200; | |
181 | cnt += wait; | |
182 | udelay(wait); | |
183 | } while (true); | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | /* | |
189 | * Insert the mailbox address into the doorbell in two steps | |
190 | * Polls on the mbox doorbell till a command completion (or a timeout) occurs | |
191 | */ | |
192 | static int be_mbox_notify_wait(struct be_adapter *adapter) | |
193 | { | |
194 | int status; | |
195 | u32 val = 0; | |
196 | void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; | |
197 | struct be_dma_mem *mbox_mem = &adapter->mbox_mem; | |
198 | struct be_mcc_mailbox *mbox = mbox_mem->va; | |
199 | struct be_mcc_compl *compl = &mbox->compl; | |
200 | ||
201 | val |= MPU_MAILBOX_DB_HI_MASK; | |
202 | /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ | |
203 | val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; | |
204 | iowrite32(val, db); | |
205 | ||
206 | /* wait for ready to be set */ | |
207 | status = be_mbox_db_ready_wait(adapter, db); | |
208 | if (status != 0) | |
209 | return status; | |
210 | ||
211 | val = 0; | |
212 | /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ | |
213 | val |= (u32)(mbox_mem->dma >> 4) << 2; | |
214 | iowrite32(val, db); | |
215 | ||
216 | status = be_mbox_db_ready_wait(adapter, db); | |
217 | if (status != 0) | |
218 | return status; | |
219 | ||
220 | /* A cq entry has been made now */ | |
221 | if (be_mcc_compl_is_new(compl)) { | |
222 | status = be_mcc_compl_process(adapter, &mbox->compl); | |
223 | be_mcc_compl_use(compl); | |
224 | if (status) | |
225 | return status; | |
226 | } else { | |
227 | dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); | |
228 | return -1; | |
229 | } | |
230 | return 0; | |
231 | } | |
232 | ||
233 | static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) | |
234 | { | |
235 | u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); | |
236 | ||
237 | *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; | |
238 | if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) | |
239 | return -1; | |
240 | else | |
241 | return 0; | |
242 | } | |
243 | ||
244 | int be_cmd_POST(struct be_adapter *adapter) | |
245 | { | |
246 | u16 stage; | |
247 | int status, timeout = 0; | |
248 | ||
249 | do { | |
250 | status = be_POST_stage_get(adapter, &stage); | |
251 | if (status) { | |
252 | dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n", | |
253 | stage); | |
254 | return -1; | |
255 | } else if (stage != POST_STAGE_ARMFW_RDY) { | |
256 | set_current_state(TASK_INTERRUPTIBLE); | |
257 | schedule_timeout(2 * HZ); | |
258 | timeout += 2; | |
259 | } else { | |
260 | return 0; | |
261 | } | |
262 | } while (timeout < 20); | |
263 | ||
264 | dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); | |
265 | return -1; | |
266 | } | |
267 | ||
268 | static inline void *embedded_payload(struct be_mcc_wrb *wrb) | |
269 | { | |
270 | return wrb->payload.embedded_payload; | |
271 | } | |
272 | ||
273 | static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) | |
274 | { | |
275 | return &wrb->payload.sgl[0]; | |
276 | } | |
277 | ||
278 | /* Don't touch the hdr after it's prepared */ | |
279 | static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | |
280 | bool embedded, u8 sge_cnt, u32 opcode) | |
281 | { | |
282 | if (embedded) | |
283 | wrb->embedded |= MCC_WRB_EMBEDDED_MASK; | |
284 | else | |
285 | wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << | |
286 | MCC_WRB_SGE_CNT_SHIFT; | |
287 | wrb->payload_length = payload_len; | |
288 | wrb->tag0 = opcode; | |
289 | be_dws_cpu_to_le(wrb, 20); | |
290 | } | |
291 | ||
292 | /* Don't touch the hdr after it's prepared */ | |
293 | static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, | |
294 | u8 subsystem, u8 opcode, int cmd_len) | |
295 | { | |
296 | req_hdr->opcode = opcode; | |
297 | req_hdr->subsystem = subsystem; | |
298 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); | |
299 | } | |
300 | ||
301 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, | |
302 | struct be_dma_mem *mem) | |
303 | { | |
304 | int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); | |
305 | u64 dma = (u64)mem->dma; | |
306 | ||
307 | for (i = 0; i < buf_pages; i++) { | |
308 | pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); | |
309 | pages[i].hi = cpu_to_le32(upper_32_bits(dma)); | |
310 | dma += PAGE_SIZE_4K; | |
311 | } | |
312 | } | |
313 | ||
314 | /* Converts interrupt delay in microseconds to multiplier value */ | |
315 | static u32 eq_delay_to_mult(u32 usec_delay) | |
316 | { | |
317 | #define MAX_INTR_RATE 651042 | |
318 | const u32 round = 10; | |
319 | u32 multiplier; | |
320 | ||
321 | if (usec_delay == 0) | |
322 | multiplier = 0; | |
323 | else { | |
324 | u32 interrupt_rate = 1000000 / usec_delay; | |
325 | /* Max delay, corresponding to the lowest interrupt rate */ | |
326 | if (interrupt_rate == 0) | |
327 | multiplier = 1023; | |
328 | else { | |
329 | multiplier = (MAX_INTR_RATE - interrupt_rate) * round; | |
330 | multiplier /= interrupt_rate; | |
331 | /* Round the multiplier to the closest value.*/ | |
332 | multiplier = (multiplier + round/2) / round; | |
333 | multiplier = min(multiplier, (u32)1023); | |
334 | } | |
335 | } | |
336 | return multiplier; | |
337 | } | |
338 | ||
339 | static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) | |
340 | { | |
341 | struct be_dma_mem *mbox_mem = &adapter->mbox_mem; | |
342 | struct be_mcc_wrb *wrb | |
343 | = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; | |
344 | memset(wrb, 0, sizeof(*wrb)); | |
345 | return wrb; | |
346 | } | |
347 | ||
348 | static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) | |
349 | { | |
350 | struct be_queue_info *mccq = &adapter->mcc_obj.q; | |
351 | struct be_mcc_wrb *wrb; | |
352 | ||
353 | if (atomic_read(&mccq->used) >= mccq->len) { | |
354 | dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); | |
355 | return NULL; | |
356 | } | |
357 | ||
358 | wrb = queue_head_node(mccq); | |
359 | queue_head_inc(mccq); | |
360 | atomic_inc(&mccq->used); | |
361 | memset(wrb, 0, sizeof(*wrb)); | |
362 | return wrb; | |
363 | } | |
364 | ||
365 | /* Tell fw we're about to start firing cmds by writing a | |
366 | * special pattern across the wrb hdr; uses mbox | |
367 | */ | |
368 | int be_cmd_fw_init(struct be_adapter *adapter) | |
369 | { | |
370 | u8 *wrb; | |
371 | int status; | |
372 | ||
373 | spin_lock(&adapter->mbox_lock); | |
374 | ||
375 | wrb = (u8 *)wrb_from_mbox(adapter); | |
376 | *wrb++ = 0xFF; | |
377 | *wrb++ = 0x12; | |
378 | *wrb++ = 0x34; | |
379 | *wrb++ = 0xFF; | |
380 | *wrb++ = 0xFF; | |
381 | *wrb++ = 0x56; | |
382 | *wrb++ = 0x78; | |
383 | *wrb = 0xFF; | |
384 | ||
385 | status = be_mbox_notify_wait(adapter); | |
386 | ||
387 | spin_unlock(&adapter->mbox_lock); | |
388 | return status; | |
389 | } | |
390 | ||
391 | /* Tell fw we're done with firing cmds by writing a | |
392 | * special pattern across the wrb hdr; uses mbox | |
393 | */ | |
394 | int be_cmd_fw_clean(struct be_adapter *adapter) | |
395 | { | |
396 | u8 *wrb; | |
397 | int status; | |
398 | ||
399 | spin_lock(&adapter->mbox_lock); | |
400 | ||
401 | wrb = (u8 *)wrb_from_mbox(adapter); | |
402 | *wrb++ = 0xFF; | |
403 | *wrb++ = 0xAA; | |
404 | *wrb++ = 0xBB; | |
405 | *wrb++ = 0xFF; | |
406 | *wrb++ = 0xFF; | |
407 | *wrb++ = 0xCC; | |
408 | *wrb++ = 0xDD; | |
409 | *wrb = 0xFF; | |
410 | ||
411 | status = be_mbox_notify_wait(adapter); | |
412 | ||
413 | spin_unlock(&adapter->mbox_lock); | |
414 | return status; | |
415 | } | |
416 | int be_cmd_eq_create(struct be_adapter *adapter, | |
417 | struct be_queue_info *eq, int eq_delay) | |
418 | { | |
419 | struct be_mcc_wrb *wrb; | |
420 | struct be_cmd_req_eq_create *req; | |
421 | struct be_dma_mem *q_mem = &eq->dma_mem; | |
422 | int status; | |
423 | ||
424 | spin_lock(&adapter->mbox_lock); | |
425 | ||
426 | wrb = wrb_from_mbox(adapter); | |
427 | req = embedded_payload(wrb); | |
428 | ||
429 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE); | |
430 | ||
431 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
432 | OPCODE_COMMON_EQ_CREATE, sizeof(*req)); | |
433 | ||
434 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
435 | ||
436 | AMAP_SET_BITS(struct amap_eq_context, func, req->context, | |
437 | be_pci_func(adapter)); | |
438 | AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); | |
439 | /* 4byte eqe*/ | |
440 | AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); | |
441 | AMAP_SET_BITS(struct amap_eq_context, count, req->context, | |
442 | __ilog2_u32(eq->len/256)); | |
443 | AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, | |
444 | eq_delay_to_mult(eq_delay)); | |
445 | be_dws_cpu_to_le(req->context, sizeof(req->context)); | |
446 | ||
447 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
448 | ||
449 | status = be_mbox_notify_wait(adapter); | |
450 | if (!status) { | |
451 | struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); | |
452 | eq->id = le16_to_cpu(resp->eq_id); | |
453 | eq->created = true; | |
454 | } | |
455 | ||
456 | spin_unlock(&adapter->mbox_lock); | |
457 | return status; | |
458 | } | |
459 | ||
460 | /* Uses mbox */ | |
461 | int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, | |
462 | u8 type, bool permanent, u32 if_handle) | |
463 | { | |
464 | struct be_mcc_wrb *wrb; | |
465 | struct be_cmd_req_mac_query *req; | |
466 | int status; | |
467 | ||
468 | spin_lock(&adapter->mbox_lock); | |
469 | ||
470 | wrb = wrb_from_mbox(adapter); | |
471 | req = embedded_payload(wrb); | |
472 | ||
473 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
474 | OPCODE_COMMON_NTWK_MAC_QUERY); | |
475 | ||
476 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
477 | OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); | |
478 | ||
479 | req->type = type; | |
480 | if (permanent) { | |
481 | req->permanent = 1; | |
482 | } else { | |
483 | req->if_id = cpu_to_le16((u16) if_handle); | |
484 | req->permanent = 0; | |
485 | } | |
486 | ||
487 | status = be_mbox_notify_wait(adapter); | |
488 | if (!status) { | |
489 | struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); | |
490 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); | |
491 | } | |
492 | ||
493 | spin_unlock(&adapter->mbox_lock); | |
494 | return status; | |
495 | } | |
496 | ||
497 | /* Uses synchronous MCCQ */ | |
498 | int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, | |
499 | u32 if_id, u32 *pmac_id) | |
500 | { | |
501 | struct be_mcc_wrb *wrb; | |
502 | struct be_cmd_req_pmac_add *req; | |
503 | int status; | |
504 | ||
505 | spin_lock_bh(&adapter->mcc_lock); | |
506 | ||
507 | wrb = wrb_from_mccq(adapter); | |
508 | if (!wrb) { | |
509 | status = -EBUSY; | |
510 | goto err; | |
511 | } | |
512 | req = embedded_payload(wrb); | |
513 | ||
514 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
515 | OPCODE_COMMON_NTWK_PMAC_ADD); | |
516 | ||
517 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
518 | OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); | |
519 | ||
520 | req->if_id = cpu_to_le32(if_id); | |
521 | memcpy(req->mac_address, mac_addr, ETH_ALEN); | |
522 | ||
523 | status = be_mcc_notify_wait(adapter); | |
524 | if (!status) { | |
525 | struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); | |
526 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
527 | } | |
528 | ||
529 | err: | |
530 | spin_unlock_bh(&adapter->mcc_lock); | |
531 | return status; | |
532 | } | |
533 | ||
534 | /* Uses synchronous MCCQ */ | |
535 | int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) | |
536 | { | |
537 | struct be_mcc_wrb *wrb; | |
538 | struct be_cmd_req_pmac_del *req; | |
539 | int status; | |
540 | ||
541 | spin_lock_bh(&adapter->mcc_lock); | |
542 | ||
543 | wrb = wrb_from_mccq(adapter); | |
544 | if (!wrb) { | |
545 | status = -EBUSY; | |
546 | goto err; | |
547 | } | |
548 | req = embedded_payload(wrb); | |
549 | ||
550 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
551 | OPCODE_COMMON_NTWK_PMAC_DEL); | |
552 | ||
553 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
554 | OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); | |
555 | ||
556 | req->if_id = cpu_to_le32(if_id); | |
557 | req->pmac_id = cpu_to_le32(pmac_id); | |
558 | ||
559 | status = be_mcc_notify_wait(adapter); | |
560 | ||
561 | err: | |
562 | spin_unlock_bh(&adapter->mcc_lock); | |
563 | return status; | |
564 | } | |
565 | ||
566 | /* Uses Mbox */ | |
567 | int be_cmd_cq_create(struct be_adapter *adapter, | |
568 | struct be_queue_info *cq, struct be_queue_info *eq, | |
569 | bool sol_evts, bool no_delay, int coalesce_wm) | |
570 | { | |
571 | struct be_mcc_wrb *wrb; | |
572 | struct be_cmd_req_cq_create *req; | |
573 | struct be_dma_mem *q_mem = &cq->dma_mem; | |
574 | void *ctxt; | |
575 | int status; | |
576 | ||
577 | spin_lock(&adapter->mbox_lock); | |
578 | ||
579 | wrb = wrb_from_mbox(adapter); | |
580 | req = embedded_payload(wrb); | |
581 | ctxt = &req->context; | |
582 | ||
583 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
584 | OPCODE_COMMON_CQ_CREATE); | |
585 | ||
586 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
587 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); | |
588 | ||
589 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
590 | ||
591 | AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); | |
592 | AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); | |
593 | AMAP_SET_BITS(struct amap_cq_context, count, ctxt, | |
594 | __ilog2_u32(cq->len/256)); | |
595 | AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); | |
596 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); | |
597 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); | |
598 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); | |
599 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); | |
600 | AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter)); | |
601 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
602 | ||
603 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
604 | ||
605 | status = be_mbox_notify_wait(adapter); | |
606 | if (!status) { | |
607 | struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); | |
608 | cq->id = le16_to_cpu(resp->cq_id); | |
609 | cq->created = true; | |
610 | } | |
611 | ||
612 | spin_unlock(&adapter->mbox_lock); | |
613 | ||
614 | return status; | |
615 | } | |
616 | ||
617 | static u32 be_encoded_q_len(int q_len) | |
618 | { | |
619 | u32 len_encoded = fls(q_len); /* log2(len) + 1 */ | |
620 | if (len_encoded == 16) | |
621 | len_encoded = 0; | |
622 | return len_encoded; | |
623 | } | |
624 | ||
625 | int be_cmd_mccq_create(struct be_adapter *adapter, | |
626 | struct be_queue_info *mccq, | |
627 | struct be_queue_info *cq) | |
628 | { | |
629 | struct be_mcc_wrb *wrb; | |
630 | struct be_cmd_req_mcc_create *req; | |
631 | struct be_dma_mem *q_mem = &mccq->dma_mem; | |
632 | void *ctxt; | |
633 | int status; | |
634 | ||
635 | spin_lock(&adapter->mbox_lock); | |
636 | ||
637 | wrb = wrb_from_mbox(adapter); | |
638 | req = embedded_payload(wrb); | |
639 | ctxt = &req->context; | |
640 | ||
641 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
642 | OPCODE_COMMON_MCC_CREATE); | |
643 | ||
644 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
645 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | |
646 | ||
647 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
648 | ||
649 | AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter)); | |
650 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); | |
651 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | |
652 | be_encoded_q_len(mccq->len)); | |
653 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | |
654 | ||
655 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
656 | ||
657 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
658 | ||
659 | status = be_mbox_notify_wait(adapter); | |
660 | if (!status) { | |
661 | struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); | |
662 | mccq->id = le16_to_cpu(resp->id); | |
663 | mccq->created = true; | |
664 | } | |
665 | spin_unlock(&adapter->mbox_lock); | |
666 | ||
667 | return status; | |
668 | } | |
669 | ||
670 | int be_cmd_txq_create(struct be_adapter *adapter, | |
671 | struct be_queue_info *txq, | |
672 | struct be_queue_info *cq) | |
673 | { | |
674 | struct be_mcc_wrb *wrb; | |
675 | struct be_cmd_req_eth_tx_create *req; | |
676 | struct be_dma_mem *q_mem = &txq->dma_mem; | |
677 | void *ctxt; | |
678 | int status; | |
679 | ||
680 | spin_lock(&adapter->mbox_lock); | |
681 | ||
682 | wrb = wrb_from_mbox(adapter); | |
683 | req = embedded_payload(wrb); | |
684 | ctxt = &req->context; | |
685 | ||
686 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
687 | OPCODE_ETH_TX_CREATE); | |
688 | ||
689 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, | |
690 | sizeof(*req)); | |
691 | ||
692 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
693 | req->ulp_num = BE_ULP1_NUM; | |
694 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; | |
695 | ||
696 | AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, | |
697 | be_encoded_q_len(txq->len)); | |
698 | AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, | |
699 | be_pci_func(adapter)); | |
700 | AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); | |
701 | AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); | |
702 | ||
703 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
704 | ||
705 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
706 | ||
707 | status = be_mbox_notify_wait(adapter); | |
708 | if (!status) { | |
709 | struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); | |
710 | txq->id = le16_to_cpu(resp->cid); | |
711 | txq->created = true; | |
712 | } | |
713 | ||
714 | spin_unlock(&adapter->mbox_lock); | |
715 | ||
716 | return status; | |
717 | } | |
718 | ||
719 | /* Uses mbox */ | |
720 | int be_cmd_rxq_create(struct be_adapter *adapter, | |
721 | struct be_queue_info *rxq, u16 cq_id, u16 frag_size, | |
722 | u16 max_frame_size, u32 if_id, u32 rss) | |
723 | { | |
724 | struct be_mcc_wrb *wrb; | |
725 | struct be_cmd_req_eth_rx_create *req; | |
726 | struct be_dma_mem *q_mem = &rxq->dma_mem; | |
727 | int status; | |
728 | ||
729 | spin_lock(&adapter->mbox_lock); | |
730 | ||
731 | wrb = wrb_from_mbox(adapter); | |
732 | req = embedded_payload(wrb); | |
733 | ||
734 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
735 | OPCODE_ETH_RX_CREATE); | |
736 | ||
737 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, | |
738 | sizeof(*req)); | |
739 | ||
740 | req->cq_id = cpu_to_le16(cq_id); | |
741 | req->frag_size = fls(frag_size) - 1; | |
742 | req->num_pages = 2; | |
743 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
744 | req->interface_id = cpu_to_le32(if_id); | |
745 | req->max_frame_size = cpu_to_le16(max_frame_size); | |
746 | req->rss_queue = cpu_to_le32(rss); | |
747 | ||
748 | status = be_mbox_notify_wait(adapter); | |
749 | if (!status) { | |
750 | struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); | |
751 | rxq->id = le16_to_cpu(resp->id); | |
752 | rxq->created = true; | |
753 | } | |
754 | ||
755 | spin_unlock(&adapter->mbox_lock); | |
756 | ||
757 | return status; | |
758 | } | |
759 | ||
760 | /* Generic destroyer function for all types of queues | |
761 | * Uses Mbox | |
762 | */ | |
763 | int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, | |
764 | int queue_type) | |
765 | { | |
766 | struct be_mcc_wrb *wrb; | |
767 | struct be_cmd_req_q_destroy *req; | |
768 | u8 subsys = 0, opcode = 0; | |
769 | int status; | |
770 | ||
771 | spin_lock(&adapter->mbox_lock); | |
772 | ||
773 | wrb = wrb_from_mbox(adapter); | |
774 | req = embedded_payload(wrb); | |
775 | ||
776 | switch (queue_type) { | |
777 | case QTYPE_EQ: | |
778 | subsys = CMD_SUBSYSTEM_COMMON; | |
779 | opcode = OPCODE_COMMON_EQ_DESTROY; | |
780 | break; | |
781 | case QTYPE_CQ: | |
782 | subsys = CMD_SUBSYSTEM_COMMON; | |
783 | opcode = OPCODE_COMMON_CQ_DESTROY; | |
784 | break; | |
785 | case QTYPE_TXQ: | |
786 | subsys = CMD_SUBSYSTEM_ETH; | |
787 | opcode = OPCODE_ETH_TX_DESTROY; | |
788 | break; | |
789 | case QTYPE_RXQ: | |
790 | subsys = CMD_SUBSYSTEM_ETH; | |
791 | opcode = OPCODE_ETH_RX_DESTROY; | |
792 | break; | |
793 | case QTYPE_MCCQ: | |
794 | subsys = CMD_SUBSYSTEM_COMMON; | |
795 | opcode = OPCODE_COMMON_MCC_DESTROY; | |
796 | break; | |
797 | default: | |
798 | BUG(); | |
799 | } | |
800 | ||
801 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode); | |
802 | ||
803 | be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); | |
804 | req->id = cpu_to_le16(q->id); | |
805 | ||
806 | status = be_mbox_notify_wait(adapter); | |
807 | ||
808 | spin_unlock(&adapter->mbox_lock); | |
809 | ||
810 | return status; | |
811 | } | |
812 | ||
813 | /* Create an rx filtering policy configuration on an i/f | |
814 | * Uses mbox | |
815 | */ | |
816 | int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, | |
817 | u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) | |
818 | { | |
819 | struct be_mcc_wrb *wrb; | |
820 | struct be_cmd_req_if_create *req; | |
821 | int status; | |
822 | ||
823 | spin_lock(&adapter->mbox_lock); | |
824 | ||
825 | wrb = wrb_from_mbox(adapter); | |
826 | req = embedded_payload(wrb); | |
827 | ||
828 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
829 | OPCODE_COMMON_NTWK_INTERFACE_CREATE); | |
830 | ||
831 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
832 | OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); | |
833 | ||
834 | req->capability_flags = cpu_to_le32(cap_flags); | |
835 | req->enable_flags = cpu_to_le32(en_flags); | |
836 | req->pmac_invalid = pmac_invalid; | |
837 | if (!pmac_invalid) | |
838 | memcpy(req->mac_addr, mac, ETH_ALEN); | |
839 | ||
840 | status = be_mbox_notify_wait(adapter); | |
841 | if (!status) { | |
842 | struct be_cmd_resp_if_create *resp = embedded_payload(wrb); | |
843 | *if_handle = le32_to_cpu(resp->interface_id); | |
844 | if (!pmac_invalid) | |
845 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
846 | } | |
847 | ||
848 | spin_unlock(&adapter->mbox_lock); | |
849 | return status; | |
850 | } | |
851 | ||
852 | /* Uses mbox */ | |
853 | int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) | |
854 | { | |
855 | struct be_mcc_wrb *wrb; | |
856 | struct be_cmd_req_if_destroy *req; | |
857 | int status; | |
858 | ||
859 | spin_lock(&adapter->mbox_lock); | |
860 | ||
861 | wrb = wrb_from_mbox(adapter); | |
862 | req = embedded_payload(wrb); | |
863 | ||
864 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
865 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY); | |
866 | ||
867 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
868 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); | |
869 | ||
870 | req->interface_id = cpu_to_le32(interface_id); | |
871 | ||
872 | status = be_mbox_notify_wait(adapter); | |
873 | ||
874 | spin_unlock(&adapter->mbox_lock); | |
875 | ||
876 | return status; | |
877 | } | |
878 | ||
879 | /* Get stats is a non embedded command: the request is not embedded inside | |
880 | * WRB but is a separate dma memory block | |
881 | * Uses asynchronous MCC | |
882 | */ | |
883 | int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) | |
884 | { | |
885 | struct be_mcc_wrb *wrb; | |
886 | struct be_cmd_req_get_stats *req; | |
887 | struct be_sge *sge; | |
888 | int status = 0; | |
889 | ||
890 | spin_lock_bh(&adapter->mcc_lock); | |
891 | ||
892 | wrb = wrb_from_mccq(adapter); | |
893 | if (!wrb) { | |
894 | status = -EBUSY; | |
895 | goto err; | |
896 | } | |
897 | req = nonemb_cmd->va; | |
898 | sge = nonembedded_sgl(wrb); | |
899 | ||
900 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, | |
901 | OPCODE_ETH_GET_STATISTICS); | |
902 | ||
903 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
904 | OPCODE_ETH_GET_STATISTICS, sizeof(*req)); | |
905 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | |
906 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | |
907 | sge->len = cpu_to_le32(nonemb_cmd->size); | |
908 | ||
909 | be_mcc_notify(adapter); | |
910 | ||
911 | err: | |
912 | spin_unlock_bh(&adapter->mcc_lock); | |
913 | return status; | |
914 | } | |
915 | ||
916 | /* Uses synchronous mcc */ | |
917 | int be_cmd_link_status_query(struct be_adapter *adapter, | |
918 | bool *link_up, u8 *mac_speed, u16 *link_speed) | |
919 | { | |
920 | struct be_mcc_wrb *wrb; | |
921 | struct be_cmd_req_link_status *req; | |
922 | int status; | |
923 | ||
924 | spin_lock_bh(&adapter->mcc_lock); | |
925 | ||
926 | wrb = wrb_from_mccq(adapter); | |
927 | if (!wrb) { | |
928 | status = -EBUSY; | |
929 | goto err; | |
930 | } | |
931 | req = embedded_payload(wrb); | |
932 | ||
933 | *link_up = false; | |
934 | ||
935 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
936 | OPCODE_COMMON_NTWK_LINK_STATUS_QUERY); | |
937 | ||
938 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
939 | OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); | |
940 | ||
941 | status = be_mcc_notify_wait(adapter); | |
942 | if (!status) { | |
943 | struct be_cmd_resp_link_status *resp = embedded_payload(wrb); | |
944 | if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { | |
945 | *link_up = true; | |
946 | *link_speed = le16_to_cpu(resp->link_speed); | |
947 | *mac_speed = resp->mac_speed; | |
948 | } | |
949 | } | |
950 | ||
951 | err: | |
952 | spin_unlock_bh(&adapter->mcc_lock); | |
953 | return status; | |
954 | } | |
955 | ||
956 | /* Uses Mbox */ | |
957 | int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) | |
958 | { | |
959 | struct be_mcc_wrb *wrb; | |
960 | struct be_cmd_req_get_fw_version *req; | |
961 | int status; | |
962 | ||
963 | spin_lock(&adapter->mbox_lock); | |
964 | ||
965 | wrb = wrb_from_mbox(adapter); | |
966 | req = embedded_payload(wrb); | |
967 | ||
968 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
969 | OPCODE_COMMON_GET_FW_VERSION); | |
970 | ||
971 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
972 | OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); | |
973 | ||
974 | status = be_mbox_notify_wait(adapter); | |
975 | if (!status) { | |
976 | struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); | |
977 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); | |
978 | } | |
979 | ||
980 | spin_unlock(&adapter->mbox_lock); | |
981 | return status; | |
982 | } | |
983 | ||
984 | /* set the EQ delay interval of an EQ to specified value | |
985 | * Uses async mcc | |
986 | */ | |
987 | int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) | |
988 | { | |
989 | struct be_mcc_wrb *wrb; | |
990 | struct be_cmd_req_modify_eq_delay *req; | |
991 | int status = 0; | |
992 | ||
993 | spin_lock_bh(&adapter->mcc_lock); | |
994 | ||
995 | wrb = wrb_from_mccq(adapter); | |
996 | if (!wrb) { | |
997 | status = -EBUSY; | |
998 | goto err; | |
999 | } | |
1000 | req = embedded_payload(wrb); | |
1001 | ||
1002 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1003 | OPCODE_COMMON_MODIFY_EQ_DELAY); | |
1004 | ||
1005 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1006 | OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); | |
1007 | ||
1008 | req->num_eq = cpu_to_le32(1); | |
1009 | req->delay[0].eq_id = cpu_to_le32(eq_id); | |
1010 | req->delay[0].phase = 0; | |
1011 | req->delay[0].delay_multiplier = cpu_to_le32(eqd); | |
1012 | ||
1013 | be_mcc_notify(adapter); | |
1014 | ||
1015 | err: | |
1016 | spin_unlock_bh(&adapter->mcc_lock); | |
1017 | return status; | |
1018 | } | |
1019 | ||
1020 | /* Uses sycnhronous mcc */ | |
1021 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | |
1022 | u32 num, bool untagged, bool promiscuous) | |
1023 | { | |
1024 | struct be_mcc_wrb *wrb; | |
1025 | struct be_cmd_req_vlan_config *req; | |
1026 | int status; | |
1027 | ||
1028 | spin_lock_bh(&adapter->mcc_lock); | |
1029 | ||
1030 | wrb = wrb_from_mccq(adapter); | |
1031 | if (!wrb) { | |
1032 | status = -EBUSY; | |
1033 | goto err; | |
1034 | } | |
1035 | req = embedded_payload(wrb); | |
1036 | ||
1037 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1038 | OPCODE_COMMON_NTWK_VLAN_CONFIG); | |
1039 | ||
1040 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1041 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); | |
1042 | ||
1043 | req->interface_id = if_id; | |
1044 | req->promiscuous = promiscuous; | |
1045 | req->untagged = untagged; | |
1046 | req->num_vlan = num; | |
1047 | if (!promiscuous) { | |
1048 | memcpy(req->normal_vlan, vtag_array, | |
1049 | req->num_vlan * sizeof(vtag_array[0])); | |
1050 | } | |
1051 | ||
1052 | status = be_mcc_notify_wait(adapter); | |
1053 | ||
1054 | err: | |
1055 | spin_unlock_bh(&adapter->mcc_lock); | |
1056 | return status; | |
1057 | } | |
1058 | ||
1059 | /* Uses MCC for this command as it may be called in BH context | |
1060 | * Uses synchronous mcc | |
1061 | */ | |
1062 | int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) | |
1063 | { | |
1064 | struct be_mcc_wrb *wrb; | |
1065 | struct be_cmd_req_promiscuous_config *req; | |
1066 | int status; | |
1067 | ||
1068 | spin_lock_bh(&adapter->mcc_lock); | |
1069 | ||
1070 | wrb = wrb_from_mccq(adapter); | |
1071 | if (!wrb) { | |
1072 | status = -EBUSY; | |
1073 | goto err; | |
1074 | } | |
1075 | req = embedded_payload(wrb); | |
1076 | ||
1077 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS); | |
1078 | ||
1079 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
1080 | OPCODE_ETH_PROMISCUOUS, sizeof(*req)); | |
1081 | ||
1082 | if (port_num) | |
1083 | req->port1_promiscuous = en; | |
1084 | else | |
1085 | req->port0_promiscuous = en; | |
1086 | ||
1087 | status = be_mcc_notify_wait(adapter); | |
1088 | ||
1089 | err: | |
1090 | spin_unlock_bh(&adapter->mcc_lock); | |
1091 | return status; | |
1092 | } | |
1093 | ||
1094 | /* | |
1095 | * Uses MCC for this command as it may be called in BH context | |
1096 | * (mc == NULL) => multicast promiscous | |
1097 | */ | |
1098 | int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, | |
1099 | struct dev_mc_list *mc_list, u32 mc_count, | |
1100 | struct be_dma_mem *mem) | |
1101 | { | |
1102 | struct be_mcc_wrb *wrb; | |
1103 | struct be_cmd_req_mcast_mac_config *req = mem->va; | |
1104 | struct be_sge *sge; | |
1105 | int status; | |
1106 | ||
1107 | spin_lock_bh(&adapter->mcc_lock); | |
1108 | ||
1109 | wrb = wrb_from_mccq(adapter); | |
1110 | if (!wrb) { | |
1111 | status = -EBUSY; | |
1112 | goto err; | |
1113 | } | |
1114 | sge = nonembedded_sgl(wrb); | |
1115 | memset(req, 0, sizeof(*req)); | |
1116 | ||
1117 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, | |
1118 | OPCODE_COMMON_NTWK_MULTICAST_SET); | |
1119 | sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); | |
1120 | sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); | |
1121 | sge->len = cpu_to_le32(mem->size); | |
1122 | ||
1123 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1124 | OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); | |
1125 | ||
1126 | req->interface_id = if_id; | |
1127 | if (mc_list) { | |
1128 | int i; | |
1129 | struct dev_mc_list *mc; | |
1130 | ||
1131 | req->num_mac = cpu_to_le16(mc_count); | |
1132 | ||
1133 | for (mc = mc_list, i = 0; mc; mc = mc->next, i++) | |
1134 | memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); | |
1135 | } else { | |
1136 | req->promiscuous = 1; | |
1137 | } | |
1138 | ||
1139 | status = be_mcc_notify_wait(adapter); | |
1140 | ||
1141 | err: | |
1142 | spin_unlock_bh(&adapter->mcc_lock); | |
1143 | return status; | |
1144 | } | |
1145 | ||
1146 | /* Uses synchrounous mcc */ | |
1147 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) | |
1148 | { | |
1149 | struct be_mcc_wrb *wrb; | |
1150 | struct be_cmd_req_set_flow_control *req; | |
1151 | int status; | |
1152 | ||
1153 | spin_lock_bh(&adapter->mcc_lock); | |
1154 | ||
1155 | wrb = wrb_from_mccq(adapter); | |
1156 | if (!wrb) { | |
1157 | status = -EBUSY; | |
1158 | goto err; | |
1159 | } | |
1160 | req = embedded_payload(wrb); | |
1161 | ||
1162 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1163 | OPCODE_COMMON_SET_FLOW_CONTROL); | |
1164 | ||
1165 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1166 | OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); | |
1167 | ||
1168 | req->tx_flow_control = cpu_to_le16((u16)tx_fc); | |
1169 | req->rx_flow_control = cpu_to_le16((u16)rx_fc); | |
1170 | ||
1171 | status = be_mcc_notify_wait(adapter); | |
1172 | ||
1173 | err: | |
1174 | spin_unlock_bh(&adapter->mcc_lock); | |
1175 | return status; | |
1176 | } | |
1177 | ||
1178 | /* Uses sycn mcc */ | |
1179 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) | |
1180 | { | |
1181 | struct be_mcc_wrb *wrb; | |
1182 | struct be_cmd_req_get_flow_control *req; | |
1183 | int status; | |
1184 | ||
1185 | spin_lock_bh(&adapter->mcc_lock); | |
1186 | ||
1187 | wrb = wrb_from_mccq(adapter); | |
1188 | if (!wrb) { | |
1189 | status = -EBUSY; | |
1190 | goto err; | |
1191 | } | |
1192 | req = embedded_payload(wrb); | |
1193 | ||
1194 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1195 | OPCODE_COMMON_GET_FLOW_CONTROL); | |
1196 | ||
1197 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1198 | OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); | |
1199 | ||
1200 | status = be_mcc_notify_wait(adapter); | |
1201 | if (!status) { | |
1202 | struct be_cmd_resp_get_flow_control *resp = | |
1203 | embedded_payload(wrb); | |
1204 | *tx_fc = le16_to_cpu(resp->tx_flow_control); | |
1205 | *rx_fc = le16_to_cpu(resp->rx_flow_control); | |
1206 | } | |
1207 | ||
1208 | err: | |
1209 | spin_unlock_bh(&adapter->mcc_lock); | |
1210 | return status; | |
1211 | } | |
1212 | ||
1213 | /* Uses mbox */ | |
1214 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) | |
1215 | { | |
1216 | struct be_mcc_wrb *wrb; | |
1217 | struct be_cmd_req_query_fw_cfg *req; | |
1218 | int status; | |
1219 | ||
1220 | spin_lock(&adapter->mbox_lock); | |
1221 | ||
1222 | wrb = wrb_from_mbox(adapter); | |
1223 | req = embedded_payload(wrb); | |
1224 | ||
1225 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1226 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG); | |
1227 | ||
1228 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1229 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); | |
1230 | ||
1231 | status = be_mbox_notify_wait(adapter); | |
1232 | if (!status) { | |
1233 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); | |
1234 | *port_num = le32_to_cpu(resp->phys_port); | |
1235 | *cap = le32_to_cpu(resp->function_cap); | |
1236 | } | |
1237 | ||
1238 | spin_unlock(&adapter->mbox_lock); | |
1239 | return status; | |
1240 | } | |
1241 | ||
1242 | /* Uses mbox */ | |
1243 | int be_cmd_reset_function(struct be_adapter *adapter) | |
1244 | { | |
1245 | struct be_mcc_wrb *wrb; | |
1246 | struct be_cmd_req_hdr *req; | |
1247 | int status; | |
1248 | ||
1249 | spin_lock(&adapter->mbox_lock); | |
1250 | ||
1251 | wrb = wrb_from_mbox(adapter); | |
1252 | req = embedded_payload(wrb); | |
1253 | ||
1254 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1255 | OPCODE_COMMON_FUNCTION_RESET); | |
1256 | ||
1257 | be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, | |
1258 | OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); | |
1259 | ||
1260 | status = be_mbox_notify_wait(adapter); | |
1261 | ||
1262 | spin_unlock(&adapter->mbox_lock); | |
1263 | return status; | |
1264 | } | |
1265 | ||
1266 | /* Uses sync mcc */ | |
1267 | int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, | |
1268 | u8 bcn, u8 sts, u8 state) | |
1269 | { | |
1270 | struct be_mcc_wrb *wrb; | |
1271 | struct be_cmd_req_enable_disable_beacon *req; | |
1272 | int status; | |
1273 | ||
1274 | spin_lock_bh(&adapter->mcc_lock); | |
1275 | ||
1276 | wrb = wrb_from_mccq(adapter); | |
1277 | if (!wrb) { | |
1278 | status = -EBUSY; | |
1279 | goto err; | |
1280 | } | |
1281 | req = embedded_payload(wrb); | |
1282 | ||
1283 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1284 | OPCODE_COMMON_ENABLE_DISABLE_BEACON); | |
1285 | ||
1286 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1287 | OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req)); | |
1288 | ||
1289 | req->port_num = port_num; | |
1290 | req->beacon_state = state; | |
1291 | req->beacon_duration = bcn; | |
1292 | req->status_duration = sts; | |
1293 | ||
1294 | status = be_mcc_notify_wait(adapter); | |
1295 | ||
1296 | err: | |
1297 | spin_unlock_bh(&adapter->mcc_lock); | |
1298 | return status; | |
1299 | } | |
1300 | ||
1301 | /* Uses sync mcc */ | |
1302 | int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) | |
1303 | { | |
1304 | struct be_mcc_wrb *wrb; | |
1305 | struct be_cmd_req_get_beacon_state *req; | |
1306 | int status; | |
1307 | ||
1308 | spin_lock_bh(&adapter->mcc_lock); | |
1309 | ||
1310 | wrb = wrb_from_mccq(adapter); | |
1311 | if (!wrb) { | |
1312 | status = -EBUSY; | |
1313 | goto err; | |
1314 | } | |
1315 | req = embedded_payload(wrb); | |
1316 | ||
1317 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1318 | OPCODE_COMMON_GET_BEACON_STATE); | |
1319 | ||
1320 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1321 | OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req)); | |
1322 | ||
1323 | req->port_num = port_num; | |
1324 | ||
1325 | status = be_mcc_notify_wait(adapter); | |
1326 | if (!status) { | |
1327 | struct be_cmd_resp_get_beacon_state *resp = | |
1328 | embedded_payload(wrb); | |
1329 | *state = resp->beacon_state; | |
1330 | } | |
1331 | ||
1332 | err: | |
1333 | spin_unlock_bh(&adapter->mcc_lock); | |
1334 | return status; | |
1335 | } | |
1336 | ||
1337 | /* Uses sync mcc */ | |
1338 | int be_cmd_read_port_type(struct be_adapter *adapter, u32 port, | |
1339 | u8 *connector) | |
1340 | { | |
1341 | struct be_mcc_wrb *wrb; | |
1342 | struct be_cmd_req_port_type *req; | |
1343 | int status; | |
1344 | ||
1345 | spin_lock_bh(&adapter->mcc_lock); | |
1346 | ||
1347 | wrb = wrb_from_mccq(adapter); | |
1348 | if (!wrb) { | |
1349 | status = -EBUSY; | |
1350 | goto err; | |
1351 | } | |
1352 | req = embedded_payload(wrb); | |
1353 | ||
1354 | be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0, | |
1355 | OPCODE_COMMON_READ_TRANSRECV_DATA); | |
1356 | ||
1357 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1358 | OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req)); | |
1359 | ||
1360 | req->port = cpu_to_le32(port); | |
1361 | req->page_num = cpu_to_le32(TR_PAGE_A0); | |
1362 | status = be_mcc_notify_wait(adapter); | |
1363 | if (!status) { | |
1364 | struct be_cmd_resp_port_type *resp = embedded_payload(wrb); | |
1365 | *connector = resp->data.connector; | |
1366 | } | |
1367 | ||
1368 | err: | |
1369 | spin_unlock_bh(&adapter->mcc_lock); | |
1370 | return status; | |
1371 | } | |
1372 | ||
1373 | int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, | |
1374 | u32 flash_type, u32 flash_opcode, u32 buf_size) | |
1375 | { | |
1376 | struct be_mcc_wrb *wrb; | |
1377 | struct be_cmd_write_flashrom *req = cmd->va; | |
1378 | struct be_sge *sge; | |
1379 | int status; | |
1380 | ||
1381 | spin_lock_bh(&adapter->mcc_lock); | |
1382 | ||
1383 | wrb = wrb_from_mccq(adapter); | |
1384 | if (!wrb) { | |
1385 | status = -EBUSY; | |
1386 | goto err; | |
1387 | } | |
1388 | req = cmd->va; | |
1389 | sge = nonembedded_sgl(wrb); | |
1390 | ||
1391 | be_wrb_hdr_prepare(wrb, cmd->size, false, 1, | |
1392 | OPCODE_COMMON_WRITE_FLASHROM); | |
1393 | ||
1394 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1395 | OPCODE_COMMON_WRITE_FLASHROM, cmd->size); | |
1396 | sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma)); | |
1397 | sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF); | |
1398 | sge->len = cpu_to_le32(cmd->size); | |
1399 | ||
1400 | req->params.op_type = cpu_to_le32(flash_type); | |
1401 | req->params.op_code = cpu_to_le32(flash_opcode); | |
1402 | req->params.data_buf_size = cpu_to_le32(buf_size); | |
1403 | ||
1404 | status = be_mcc_notify_wait(adapter); | |
1405 | ||
1406 | err: | |
1407 | spin_unlock_bh(&adapter->mcc_lock); | |
1408 | return status; | |
1409 | } | |
1410 | ||
1411 | int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc) | |
1412 | { | |
1413 | struct be_mcc_wrb *wrb; | |
1414 | struct be_cmd_write_flashrom *req; | |
1415 | int status; | |
1416 | ||
1417 | spin_lock_bh(&adapter->mcc_lock); | |
1418 | ||
1419 | wrb = wrb_from_mccq(adapter); | |
1420 | if (!wrb) { | |
1421 | status = -EBUSY; | |
1422 | goto err; | |
1423 | } | |
1424 | req = embedded_payload(wrb); | |
1425 | ||
1426 | be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0, | |
1427 | OPCODE_COMMON_READ_FLASHROM); | |
1428 | ||
1429 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1430 | OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4); | |
1431 | ||
1432 | req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT); | |
1433 | req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); | |
1434 | req->params.offset = 0x3FFFC; | |
1435 | req->params.data_buf_size = 0x4; | |
1436 | ||
1437 | status = be_mcc_notify_wait(adapter); | |
1438 | if (!status) | |
1439 | memcpy(flashed_crc, req->params.data_buf, 4); | |
1440 | ||
1441 | err: | |
1442 | spin_unlock_bh(&adapter->mcc_lock); | |
1443 | return status; | |
1444 | } | |
1445 | ||
1446 | extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, | |
1447 | struct be_dma_mem *nonemb_cmd) | |
1448 | { | |
1449 | struct be_mcc_wrb *wrb; | |
1450 | struct be_cmd_req_acpi_wol_magic_config *req; | |
1451 | struct be_sge *sge; | |
1452 | int status; | |
1453 | ||
1454 | spin_lock_bh(&adapter->mcc_lock); | |
1455 | ||
1456 | wrb = wrb_from_mccq(adapter); | |
1457 | if (!wrb) { | |
1458 | status = -EBUSY; | |
1459 | goto err; | |
1460 | } | |
1461 | req = nonemb_cmd->va; | |
1462 | sge = nonembedded_sgl(wrb); | |
1463 | ||
1464 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, | |
1465 | OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG); | |
1466 | ||
1467 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
1468 | OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req)); | |
1469 | memcpy(req->magic_mac, mac, ETH_ALEN); | |
1470 | ||
1471 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | |
1472 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | |
1473 | sge->len = cpu_to_le32(nonemb_cmd->size); | |
1474 | ||
1475 | status = be_mcc_notify_wait(adapter); | |
1476 | ||
1477 | err: | |
1478 | spin_unlock_bh(&adapter->mcc_lock); | |
1479 | return status; | |
1480 | } | |
1481 | ||
1482 | int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |
1483 | u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) | |
1484 | { | |
1485 | struct be_mcc_wrb *wrb; | |
1486 | struct be_cmd_req_loopback_test *req; | |
1487 | int status; | |
1488 | ||
1489 | spin_lock_bh(&adapter->mcc_lock); | |
1490 | ||
1491 | wrb = wrb_from_mccq(adapter); | |
1492 | if (!wrb) { | |
1493 | status = -EBUSY; | |
1494 | goto err; | |
1495 | } | |
1496 | ||
1497 | req = embedded_payload(wrb); | |
1498 | ||
1499 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | |
1500 | OPCODE_LOWLEVEL_LOOPBACK_TEST); | |
1501 | ||
1502 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | |
1503 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); | |
1504 | ||
1505 | req->pattern = cpu_to_le64(pattern); | |
1506 | req->src_port = cpu_to_le32(port_num); | |
1507 | req->dest_port = cpu_to_le32(port_num); | |
1508 | req->pkt_size = cpu_to_le32(pkt_size); | |
1509 | req->num_pkts = cpu_to_le32(num_pkts); | |
1510 | req->loopback_type = cpu_to_le32(loopback_type); | |
1511 | ||
1512 | status = be_mcc_notify_wait(adapter); | |
1513 | if (!status) { | |
1514 | struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); | |
1515 | status = le32_to_cpu(resp->status); | |
1516 | } | |
1517 | ||
1518 | err: | |
1519 | spin_unlock_bh(&adapter->mcc_lock); | |
1520 | return status; | |
1521 | } | |
1522 | ||
1523 | int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, | |
1524 | u32 byte_cnt, struct be_dma_mem *cmd) | |
1525 | { | |
1526 | struct be_mcc_wrb *wrb; | |
1527 | struct be_cmd_req_ddrdma_test *req; | |
1528 | struct be_sge *sge; | |
1529 | int status; | |
1530 | int i, j = 0; | |
1531 | ||
1532 | spin_lock_bh(&adapter->mcc_lock); | |
1533 | ||
1534 | wrb = wrb_from_mccq(adapter); | |
1535 | if (!wrb) { | |
1536 | status = -EBUSY; | |
1537 | goto err; | |
1538 | } | |
1539 | req = cmd->va; | |
1540 | sge = nonembedded_sgl(wrb); | |
1541 | be_wrb_hdr_prepare(wrb, cmd->size, false, 1, | |
1542 | OPCODE_LOWLEVEL_HOST_DDR_DMA); | |
1543 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | |
1544 | OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size); | |
1545 | ||
1546 | sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma)); | |
1547 | sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF); | |
1548 | sge->len = cpu_to_le32(cmd->size); | |
1549 | ||
1550 | req->pattern = cpu_to_le64(pattern); | |
1551 | req->byte_count = cpu_to_le32(byte_cnt); | |
1552 | for (i = 0; i < byte_cnt; i++) { | |
1553 | req->snd_buff[i] = (u8)(pattern >> (j*8)); | |
1554 | j++; | |
1555 | if (j > 7) | |
1556 | j = 0; | |
1557 | } | |
1558 | ||
1559 | status = be_mcc_notify_wait(adapter); | |
1560 | ||
1561 | if (!status) { | |
1562 | struct be_cmd_resp_ddrdma_test *resp; | |
1563 | resp = cmd->va; | |
1564 | if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || | |
1565 | resp->snd_err) { | |
1566 | status = -1; | |
1567 | } | |
1568 | } | |
1569 | ||
1570 | err: | |
1571 | spin_unlock_bh(&adapter->mcc_lock); | |
1572 | return status; | |
1573 | } | |
1574 | ||
1575 | extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |
1576 | struct be_dma_mem *nonemb_cmd) | |
1577 | { | |
1578 | struct be_mcc_wrb *wrb; | |
1579 | struct be_cmd_req_seeprom_read *req; | |
1580 | struct be_sge *sge; | |
1581 | int status; | |
1582 | ||
1583 | spin_lock_bh(&adapter->mcc_lock); | |
1584 | ||
1585 | wrb = wrb_from_mccq(adapter); | |
1586 | req = nonemb_cmd->va; | |
1587 | sge = nonembedded_sgl(wrb); | |
1588 | ||
1589 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, | |
1590 | OPCODE_COMMON_SEEPROM_READ); | |
1591 | ||
1592 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1593 | OPCODE_COMMON_SEEPROM_READ, sizeof(*req)); | |
1594 | ||
1595 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | |
1596 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | |
1597 | sge->len = cpu_to_le32(nonemb_cmd->size); | |
1598 | ||
1599 | status = be_mcc_notify_wait(adapter); | |
1600 | ||
1601 | spin_unlock_bh(&adapter->mcc_lock); | |
1602 | return status; | |
1603 | } |