]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/brcm80211/util/aiutils.c
Staging: brcm80211: remove TRUE #define
[net-next-2.6.git] / drivers / staging / brcm80211 / util / aiutils.c
CommitLineData
a9533e7e
HP
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <typedefs.h>
18#include <bcmdefs.h>
19#include <osl.h>
3327989a
BR
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linuxver.h>
a9533e7e
HP
23#include <bcmutils.h>
24#include <siutils.h>
25#include <hndsoc.h>
26#include <sbchipc.h>
27#include <pcicfg.h>
28#include <bcmdevs.h>
29
30#define BCM47162_DMP() ((CHIPID(sih->chip) == BCM47162_CHIP_ID) && \
31 (CHIPREV(sih->chiprev) == 0) && \
32 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
33
34/* EROM parsing */
35
66cbd3ab
GKH
36static u32
37get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
a9533e7e 38{
66cbd3ab 39 u32 ent;
a9533e7e
HP
40 uint inv = 0, nom = 0;
41
0f0881b0 42 while (true) {
a9533e7e
HP
43 ent = R_REG(si_osh(sih), *eromptr);
44 (*eromptr)++;
45
46 if (mask == 0)
47 break;
48
49 if ((ent & ER_VALID) == 0) {
50 inv++;
51 continue;
52 }
53
54 if (ent == (ER_END | ER_VALID))
55 break;
56
57 if ((ent & mask) == match)
58 break;
59
60 nom++;
61 }
62
63 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
64 if (inv + nom) {
65 SI_VMSG((" after %d invalid and %d non-matching entries\n",
66 inv, nom));
67 }
68 return ent;
69}
70
66cbd3ab
GKH
71static u32
72get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
73 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
a9533e7e 74{
66cbd3ab 75 u32 asd, sz, szd;
a9533e7e
HP
76
77 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
78 if (((asd & ER_TAG1) != ER_ADD) ||
79 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
80 ((asd & AD_ST_MASK) != st)) {
81 /* This is not what we want, "push" it back */
82 (*eromptr)--;
83 return 0;
84 }
85 *addrl = asd & AD_ADDR_MASK;
86 if (asd & AD_AG32)
87 *addrh = get_erom_ent(sih, eromptr, 0, 0);
88 else
89 *addrh = 0;
90 *sizeh = 0;
91 sz = asd & AD_SZ_MASK;
92 if (sz == AD_SZ_SZD) {
93 szd = get_erom_ent(sih, eromptr, 0, 0);
94 *sizel = szd & SD_SZ_MASK;
95 if (szd & SD_SG32)
96 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
97 } else
98 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
99
100 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
101 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
102
103 return asd;
104}
105
7cc4a4c0 106static void ai_hwfixup(si_info_t *sii)
a9533e7e
HP
107{
108}
109
110/* parse the enumeration rom to identify all cores */
0d2f0724 111void ai_scan(si_t *sih, void *regs, uint devid)
a2627bc0 112{
a9533e7e
HP
113 si_info_t *sii = SI_INFO(sih);
114 chipcregs_t *cc = (chipcregs_t *) regs;
66cbd3ab 115 u32 erombase, *eromptr, *eromlim;
a9533e7e
HP
116
117 erombase = R_REG(sii->osh, &cc->eromptr);
118
119 switch (BUSTYPE(sih->bustype)) {
120 case SI_BUS:
66cbd3ab 121 eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
a9533e7e
HP
122 break;
123
124 case PCI_BUS:
125 /* Set wrappers address */
126 sii->curwrap = (void *)((uintptr) regs + SI_CORE_SIZE);
127
128 /* Now point the window at the erom */
129 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
130 eromptr = regs;
131 break;
132
133#ifdef BCMSDIO
134 case SPI_BUS:
135 case SDIO_BUS:
136#endif /* BCMSDIO */
66cbd3ab 137 eromptr = (u32 *) (uintptr) erombase;
a9533e7e
HP
138 break;
139
140 default:
141 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
142 sih->bustype));
143 ASSERT(0);
144 return;
145 }
66cbd3ab 146 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
a9533e7e
HP
147
148 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
149 while (eromptr < eromlim) {
66cbd3ab
GKH
150 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
151 u32 mpd, asd, addrl, addrh, sizel, sizeh;
152 u32 *base;
a9533e7e
HP
153 uint i, j, idx;
154 bool br;
155
156 br = FALSE;
157
158 /* Grok a component */
159 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
160 if (cia == (ER_END | ER_VALID)) {
161 SI_VMSG(("Found END of erom after %d cores\n",
162 sii->numcores));
163 ai_hwfixup(sii);
164 return;
165 }
166 base = eromptr - 1;
167 cib = get_erom_ent(sih, &eromptr, 0, 0);
168
169 if ((cib & ER_TAG) != ER_CI) {
170 SI_ERROR(("CIA not followed by CIB\n"));
171 goto error;
172 }
173
174 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
175 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
176 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
177 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
178 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
179 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
180 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
181
182 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
183
184 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
185 continue;
186 if ((nmw + nsw == 0)) {
187 /* A component which is not a core */
188 if (cid == OOB_ROUTER_CORE_ID) {
189 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
190 &addrl, &addrh, &sizel, &sizeh);
191 if (asd != 0) {
192 sii->oob_router = addrl;
193 }
194 }
195 continue;
196 }
197
198 idx = sii->numcores;
199/* sii->eromptr[idx] = base; */
200 sii->cia[idx] = cia;
201 sii->cib[idx] = cib;
202 sii->coreid[idx] = cid;
203
204 for (i = 0; i < nmp; i++) {
205 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
206 if ((mpd & ER_TAG) != ER_MP) {
207 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
208 goto error;
209 }
210 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
211 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
212 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
213 }
214
215 /* First Slave Address Descriptor should be port 0:
216 * the main register space for the core
217 */
218 asd =
219 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
220 &sizel, &sizeh);
221 if (asd == 0) {
222 /* Try again to see if it is a bridge */
223 asd =
224 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
225 &addrh, &sizel, &sizeh);
226 if (asd != 0)
0f0881b0 227 br = true;
a9533e7e
HP
228 else if ((addrh != 0) || (sizeh != 0)
229 || (sizel != SI_CORE_SIZE)) {
230 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
231 goto error;
232 }
233 }
234 sii->coresba[idx] = addrl;
235 sii->coresba_size[idx] = sizel;
236 /* Get any more ASDs in port 0 */
237 j = 1;
238 do {
239 asd =
240 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
241 &addrh, &sizel, &sizeh);
242 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
243 sii->coresba2[idx] = addrl;
244 sii->coresba2_size[idx] = sizel;
245 }
246 j++;
247 } while (asd != 0);
248
249 /* Go through the ASDs for other slave ports */
250 for (i = 1; i < nsp; i++) {
251 j = 0;
252 do {
253 asd =
254 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
255 &addrl, &addrh, &sizel, &sizeh);
256 } while (asd != 0);
257 if (j == 0) {
258 SI_ERROR((" SP %d has no address descriptors\n",
259 i));
260 goto error;
261 }
262 }
263
264 /* Now get master wrappers */
265 for (i = 0; i < nmw; i++) {
266 asd =
267 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
268 &addrh, &sizel, &sizeh);
269 if (asd == 0) {
270 SI_ERROR(("Missing descriptor for MW %d\n", i));
271 goto error;
272 }
273 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
274 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
275 goto error;
276 }
277 if (i == 0)
278 sii->wrapba[idx] = addrl;
279 }
280
281 /* And finally slave wrappers */
282 for (i = 0; i < nsw; i++) {
283 uint fwp = (nsp == 1) ? 0 : 1;
284 asd =
285 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
286 &addrl, &addrh, &sizel, &sizeh);
287 if (asd == 0) {
288 SI_ERROR(("Missing descriptor for SW %d\n", i));
289 goto error;
290 }
291 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
292 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
293 goto error;
294 }
295 if ((nmw == 0) && (i == 0))
296 sii->wrapba[idx] = addrl;
297 }
298
299 /* Don't record bridges */
300 if (br)
301 continue;
302
303 /* Done with core */
304 sii->numcores++;
305 }
306
307 SI_ERROR(("Reached end of erom without finding END"));
308
309 error:
310 sii->numcores = 0;
311 return;
312}
313
314/* This function changes the logical "focus" to the indicated core.
315 * Return the current core's virtual address.
316 */
7cc4a4c0 317void *ai_setcoreidx(si_t *sih, uint coreidx)
a9533e7e
HP
318{
319 si_info_t *sii = SI_INFO(sih);
66cbd3ab
GKH
320 u32 addr = sii->coresba[coreidx];
321 u32 wrap = sii->wrapba[coreidx];
a9533e7e
HP
322 void *regs;
323
324 if (coreidx >= sii->numcores)
90ea2296 325 return NULL;
a9533e7e
HP
326
327 /*
328 * If the user has provided an interrupt mask enabled function,
329 * then assert interrupts are disabled before switching the core.
330 */
331 ASSERT((sii->intrsenabled_fn == NULL)
332 || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
333
334 switch (BUSTYPE(sih->bustype)) {
335 case SI_BUS:
336 /* map new one */
337 if (!sii->regs[coreidx]) {
338 sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
339 ASSERT(GOODREGS(sii->regs[coreidx]));
340 }
341 sii->curmap = regs = sii->regs[coreidx];
342 if (!sii->wrappers[coreidx]) {
343 sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
344 ASSERT(GOODREGS(sii->wrappers[coreidx]));
345 }
346 sii->curwrap = sii->wrappers[coreidx];
347 break;
348
349 case PCI_BUS:
350 /* point bar0 window */
351 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
352 regs = sii->curmap;
353 /* point bar0 2nd 4KB window */
354 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
355 break;
356
357#ifdef BCMSDIO
358 case SPI_BUS:
359 case SDIO_BUS:
360#endif /* BCMSDIO */
361 sii->curmap = regs = (void *)((uintptr) addr);
362 sii->curwrap = (void *)((uintptr) wrap);
363 break;
364
365 default:
366 ASSERT(0);
367 regs = NULL;
368 break;
369 }
370
371 sii->curmap = regs;
372 sii->curidx = coreidx;
373
374 return regs;
375}
376
377/* Return the number of address spaces in current core */
7cc4a4c0 378int ai_numaddrspaces(si_t *sih)
a9533e7e
HP
379{
380 return 2;
381}
382
383/* Return the address of the nth address space in the current core */
66cbd3ab 384u32 ai_addrspace(si_t *sih, uint asidx)
a9533e7e
HP
385{
386 si_info_t *sii;
387 uint cidx;
388
389 sii = SI_INFO(sih);
390 cidx = sii->curidx;
391
392 if (asidx == 0)
393 return sii->coresba[cidx];
394 else if (asidx == 1)
395 return sii->coresba2[cidx];
396 else {
397 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
398 return 0;
399 }
400}
401
402/* Return the size of the nth address space in the current core */
66cbd3ab 403u32 ai_addrspacesize(si_t *sih, uint asidx)
a9533e7e
HP
404{
405 si_info_t *sii;
406 uint cidx;
407
408 sii = SI_INFO(sih);
409 cidx = sii->curidx;
410
411 if (asidx == 0)
412 return sii->coresba_size[cidx];
413 else if (asidx == 1)
414 return sii->coresba2_size[cidx];
415 else {
416 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
417 return 0;
418 }
419}
420
7cc4a4c0 421uint ai_flag(si_t *sih)
a9533e7e
HP
422{
423 si_info_t *sii;
424 aidmp_t *ai;
425
426 sii = SI_INFO(sih);
427 if (BCM47162_DMP()) {
428 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
429 return sii->curidx;
430 }
431 ai = sii->curwrap;
432
90ea2296 433 return R_REG(sii->osh, &ai->oobselouta30) & 0x1f;
a9533e7e
HP
434}
435
7cc4a4c0 436void ai_setint(si_t *sih, int siflag)
a9533e7e
HP
437{
438}
439
66cbd3ab 440void ai_write_wrap_reg(si_t *sih, u32 offset, u32 val)
a9533e7e
HP
441{
442 si_info_t *sii = SI_INFO(sih);
66cbd3ab 443 u32 *w = (u32 *) sii->curwrap;
a9533e7e
HP
444 W_REG(sii->osh, w + (offset / 4), val);
445 return;
446}
447
7cc4a4c0 448uint ai_corevendor(si_t *sih)
a9533e7e
HP
449{
450 si_info_t *sii;
66cbd3ab 451 u32 cia;
a9533e7e
HP
452
453 sii = SI_INFO(sih);
454 cia = sii->cia[sii->curidx];
90ea2296 455 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
a9533e7e
HP
456}
457
7cc4a4c0 458uint ai_corerev(si_t *sih)
a9533e7e
HP
459{
460 si_info_t *sii;
66cbd3ab 461 u32 cib;
a9533e7e
HP
462
463 sii = SI_INFO(sih);
464 cib = sii->cib[sii->curidx];
90ea2296 465 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
a9533e7e
HP
466}
467
7cc4a4c0 468bool ai_iscoreup(si_t *sih)
a9533e7e
HP
469{
470 si_info_t *sii;
471 aidmp_t *ai;
472
473 sii = SI_INFO(sih);
474 ai = sii->curwrap;
475
476 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
477 SICF_CLOCK_EN)
478 && ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
479}
480
481/*
482 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
483 * switch back to the original core, and return the new value.
484 *
485 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
486 *
487 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
488 * and (on newer pci cores) chipcommon registers.
489 */
7cc4a4c0 490uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
a9533e7e
HP
491{
492 uint origidx = 0;
66cbd3ab 493 u32 *r = NULL;
a9533e7e
HP
494 uint w;
495 uint intr_val = 0;
496 bool fast = FALSE;
497 si_info_t *sii;
498
499 sii = SI_INFO(sih);
500
501 ASSERT(GOODIDX(coreidx));
502 ASSERT(regoff < SI_CORE_SIZE);
503 ASSERT((val & ~mask) == 0);
504
505 if (coreidx >= SI_MAXCORES)
506 return 0;
507
508 if (BUSTYPE(sih->bustype) == SI_BUS) {
509 /* If internal bus, we can always get at everything */
0f0881b0 510 fast = true;
a9533e7e
HP
511 /* map if does not exist */
512 if (!sii->regs[coreidx]) {
513 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
514 SI_CORE_SIZE);
515 ASSERT(GOODREGS(sii->regs[coreidx]));
516 }
66cbd3ab 517 r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
a9533e7e
HP
518 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
519 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
520
521 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
522 /* Chipc registers are mapped at 12KB */
523
0f0881b0 524 fast = true;
66cbd3ab 525 r = (u32 *) ((char *)sii->curmap +
a9533e7e
HP
526 PCI_16KB0_CCREGS_OFFSET + regoff);
527 } else if (sii->pub.buscoreidx == coreidx) {
528 /* pci registers are at either in the last 2KB of an 8KB window
529 * or, in pcie and pci rev 13 at 8KB
530 */
0f0881b0 531 fast = true;
a9533e7e 532 if (SI_FAST(sii))
66cbd3ab 533 r = (u32 *) ((char *)sii->curmap +
a9533e7e
HP
534 PCI_16KB0_PCIREGS_OFFSET +
535 regoff);
536 else
66cbd3ab 537 r = (u32 *) ((char *)sii->curmap +
a9533e7e
HP
538 ((regoff >= SBCONFIGOFF) ?
539 PCI_BAR0_PCISBR_OFFSET :
540 PCI_BAR0_PCIREGS_OFFSET) +
541 regoff);
542 }
543 }
544
545 if (!fast) {
546 INTR_OFF(sii, intr_val);
547
548 /* save current core index */
549 origidx = si_coreidx(&sii->pub);
550
551 /* switch core */
66cbd3ab 552 r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx) +
a9533e7e
HP
553 regoff);
554 }
555 ASSERT(r != NULL);
556
557 /* mask and set */
558 if (mask || val) {
559 w = (R_REG(sii->osh, r) & ~mask) | val;
560 W_REG(sii->osh, r, w);
561 }
562
563 /* readback */
564 w = R_REG(sii->osh, r);
565
566 if (!fast) {
567 /* restore core index */
568 if (origidx != coreidx)
569 ai_setcoreidx(&sii->pub, origidx);
570
571 INTR_RESTORE(sii, intr_val);
572 }
573
90ea2296 574 return w;
a9533e7e
HP
575}
576
66cbd3ab 577void ai_core_disable(si_t *sih, u32 bits)
a9533e7e
HP
578{
579 si_info_t *sii;
66cbd3ab 580 volatile u32 dummy;
a9533e7e
HP
581 aidmp_t *ai;
582
583 sii = SI_INFO(sih);
584
585 ASSERT(GOODREGS(sii->curwrap));
586 ai = sii->curwrap;
587
588 /* if core is already in reset, just return */
589 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
590 return;
591
592 W_REG(sii->osh, &ai->ioctrl, bits);
593 dummy = R_REG(sii->osh, &ai->ioctrl);
594 OSL_DELAY(10);
595
596 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
597 OSL_DELAY(1);
598}
599
600/* reset and re-enable a core
601 * inputs:
602 * bits - core specific bits that are set during and after reset sequence
603 * resetbits - core specific bits that are set only during reset sequence
604 */
66cbd3ab 605void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
a9533e7e
HP
606{
607 si_info_t *sii;
608 aidmp_t *ai;
66cbd3ab 609 volatile u32 dummy;
a9533e7e
HP
610
611 sii = SI_INFO(sih);
612 ASSERT(GOODREGS(sii->curwrap));
613 ai = sii->curwrap;
614
615 /*
616 * Must do the disable sequence first to work for arbitrary current core state.
617 */
618 ai_core_disable(sih, (bits | resetbits));
619
620 /*
621 * Now do the initialization sequence.
622 */
623 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
624 dummy = R_REG(sii->osh, &ai->ioctrl);
625 W_REG(sii->osh, &ai->resetctrl, 0);
626 OSL_DELAY(1);
627
628 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
629 dummy = R_REG(sii->osh, &ai->ioctrl);
630 OSL_DELAY(1);
631}
632
66cbd3ab 633void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
a9533e7e
HP
634{
635 si_info_t *sii;
636 aidmp_t *ai;
66cbd3ab 637 u32 w;
a9533e7e
HP
638
639 sii = SI_INFO(sih);
640
641 if (BCM47162_DMP()) {
642 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
643 __func__));
644 return;
645 }
646
647 ASSERT(GOODREGS(sii->curwrap));
648 ai = sii->curwrap;
649
650 ASSERT((val & ~mask) == 0);
651
652 if (mask || val) {
653 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
654 W_REG(sii->osh, &ai->ioctrl, w);
655 }
656}
657
66cbd3ab 658u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
a9533e7e
HP
659{
660 si_info_t *sii;
661 aidmp_t *ai;
66cbd3ab 662 u32 w;
a9533e7e
HP
663
664 sii = SI_INFO(sih);
665 if (BCM47162_DMP()) {
666 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
667 __func__));
668 return 0;
669 }
670
671 ASSERT(GOODREGS(sii->curwrap));
672 ai = sii->curwrap;
673
674 ASSERT((val & ~mask) == 0);
675
676 if (mask || val) {
677 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
678 W_REG(sii->osh, &ai->ioctrl, w);
679 }
680
681 return R_REG(sii->osh, &ai->ioctrl);
682}
683
66cbd3ab 684u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
a9533e7e
HP
685{
686 si_info_t *sii;
687 aidmp_t *ai;
66cbd3ab 688 u32 w;
a9533e7e
HP
689
690 sii = SI_INFO(sih);
691 if (BCM47162_DMP()) {
692 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
693 return 0;
694 }
695
696 ASSERT(GOODREGS(sii->curwrap));
697 ai = sii->curwrap;
698
699 ASSERT((val & ~mask) == 0);
700 ASSERT((mask & ~SISF_CORE_BITS) == 0);
701
702 if (mask || val) {
703 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
704 W_REG(sii->osh, &ai->iostatus, w);
705 }
706
707 return R_REG(sii->osh, &ai->iostatus);
708}
709