2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright (C) 1999-2011, Broadcom Corporation
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
25 * $Id: sbutils.c,v 1.687.2.1 2010-11-29 20:21:56 Exp $
39 #include "siutils_priv.h"
42 /* local prototypes */
43 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
44 static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
46 static uint32 _sb_coresba(si_info_t *sii);
47 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
49 #define SET_SBREG(sii, r, mask, val) \
50 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
51 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
54 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
55 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
57 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
58 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
59 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
60 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
63 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
66 uint32 val, intr_val = 0;
70 * compact flash only has 11 bits address, while we needs 12 bits address.
71 * MEM_SEG will be OR'd with other 11 bits address in hardware,
72 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
73 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
76 INTR_OFF(sii, intr_val);
78 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
79 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
82 val = R_REG(sii->osh, sbr);
86 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
87 INTR_RESTORE(sii, intr_val);
94 sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
97 volatile uint32 dummy;
102 * compact flash only has 11 bits address, while we needs 12 bits address.
103 * MEM_SEG will be OR'd with other 11 bits address in hardware,
104 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
105 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
108 INTR_OFF(sii, intr_val);
110 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
111 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
114 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
115 dummy = R_REG(sii->osh, sbr);
116 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
117 dummy = R_REG(sii->osh, sbr);
118 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
120 W_REG(sii->osh, sbr, v);
124 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
125 INTR_RESTORE(sii, intr_val);
136 sb = REGS2SB(sii->curmap);
138 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
142 sb_intflag(si_t *sih)
147 uint origidx, intflag, intr_val = 0;
151 INTR_OFF(sii, intr_val);
152 origidx = si_coreidx(sih);
153 corereg = si_setcore(sih, CC_CORE_ID, 0);
154 ASSERT(corereg != NULL);
155 sb = REGS2SB(corereg);
156 intflag = R_SBREG(sii, &sb->sbflagst);
157 sb_setcoreidx(sih, origidx);
158 INTR_RESTORE(sii, intr_val);
170 sb = REGS2SB(sii->curmap);
172 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
176 sb_setint(si_t *sih, int siflag)
183 sb = REGS2SB(sii->curmap);
189 W_SBREG(sii, &sb->sbintvec, vec);
192 /* return core index of the core with address 'sba' */
194 _sb_coreidx(si_info_t *sii, uint32 sba)
198 for (i = 0; i < sii->numcores; i ++)
199 if (sba == sii->coresba[i])
204 /* return core address of the current core */
206 _sb_coresba(si_info_t *sii)
211 switch (BUSTYPE(sii->pub.bustype)) {
213 sbconfig_t *sb = REGS2SB(sii->curmap);
214 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
219 sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
224 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
225 sbaddr = (uint32)tmp << 12;
226 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
227 sbaddr |= (uint32)tmp << 16;
228 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
229 sbaddr |= (uint32)tmp << 24;
235 sbaddr = (uint32)(uintptr)sii->curmap;
240 sbaddr = BADCOREADDR;
248 sb_corevendor(si_t *sih)
254 sb = REGS2SB(sii->curmap);
256 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
260 sb_corerev(si_t *sih)
267 sb = REGS2SB(sii->curmap);
268 sbidh = R_SBREG(sii, &sb->sbidhigh);
270 return (SBCOREREV(sbidh));
273 /* set core-specific control flags */
275 sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
282 sb = REGS2SB(sii->curmap);
284 ASSERT((val & ~mask) == 0);
287 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
288 (val << SBTML_SICF_SHIFT);
289 W_SBREG(sii, &sb->sbtmstatelow, w);
292 /* set/clear core-specific control flags */
294 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
301 sb = REGS2SB(sii->curmap);
303 ASSERT((val & ~mask) == 0);
307 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
308 (val << SBTML_SICF_SHIFT);
309 W_SBREG(sii, &sb->sbtmstatelow, w);
312 /* return the new value
313 * for write operation, the following readback ensures the completion of write opration.
315 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
318 /* set/clear core-specific status flags */
320 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
327 sb = REGS2SB(sii->curmap);
329 ASSERT((val & ~mask) == 0);
330 ASSERT((mask & ~SISF_CORE_BITS) == 0);
334 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
335 (val << SBTMH_SISF_SHIFT);
336 W_SBREG(sii, &sb->sbtmstatehigh, w);
339 /* return the new value */
340 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
344 sb_iscoreup(si_t *sih)
350 sb = REGS2SB(sii->curmap);
352 return ((R_SBREG(sii, &sb->sbtmstatelow) &
353 (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
354 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
358 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
359 * switch back to the original core, and return the new value.
361 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
363 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
364 * and (on newer pci cores) chipcommon registers.
367 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
378 ASSERT(GOODIDX(coreidx));
379 ASSERT(regoff < SI_CORE_SIZE);
380 ASSERT((val & ~mask) == 0);
382 if (coreidx >= SI_MAXCORES)
385 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
386 /* If internal bus, we can always get at everything */
388 /* map if does not exist */
389 if (!sii->regs[coreidx]) {
390 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
392 ASSERT(GOODREGS(sii->regs[coreidx]));
394 r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
395 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
396 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
398 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
399 /* Chipc registers are mapped at 12KB */
402 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
403 } else if (sii->pub.buscoreidx == coreidx) {
404 /* pci registers are at either in the last 2KB of an 8KB window
405 * or, in pcie and pci rev 13 at 8KB
409 r = (uint32 *)((char *)sii->curmap +
410 PCI_16KB0_PCIREGS_OFFSET + regoff);
412 r = (uint32 *)((char *)sii->curmap +
413 ((regoff >= SBCONFIGOFF) ?
414 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
420 INTR_OFF(sii, intr_val);
422 /* save current core index */
423 origidx = si_coreidx(&sii->pub);
426 r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
432 if (regoff >= SBCONFIGOFF) {
433 w = (R_SBREG(sii, r) & ~mask) | val;
436 w = (R_REG(sii->osh, r) & ~mask) | val;
437 W_REG(sii->osh, r, w);
442 if (regoff >= SBCONFIGOFF)
445 if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
446 (coreidx == SI_CC_IDX) &&
447 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
450 w = R_REG(sii->osh, r);
454 /* restore core index */
455 if (origidx != coreidx)
456 sb_setcoreidx(&sii->pub, origidx);
458 INTR_RESTORE(sii, intr_val);
464 /* Scan the enumeration space to find all cores starting from the given
465 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
466 * is the default core address at chip POR time and 'regs' is the virtual
467 * address that the default core is mapped at. 'ncores' is the number of
468 * cores expected on bus 'sbba'. It returns the total number of cores
469 * starting from bus 'sbba', inclusive.
471 #define SB_MAXBUSES 2
473 _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
479 if (bus >= SB_MAXBUSES) {
480 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
483 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
485 /* Scan all cores on the bus starting from core 0.
486 * Core addresses must be contiguous on each bus.
488 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
489 sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
491 /* keep and reuse the initial register mapping */
492 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
493 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
494 sii->regs[next] = regs;
497 /* change core to 'next' and read its coreid */
498 sii->curmap = _sb_setcoreidx(sii, next);
501 sii->coreid[next] = sb_coreid(&sii->pub);
503 /* core specific processing... */
504 /* chipc provides # cores */
505 if (sii->coreid[next] == CC_CORE_ID) {
506 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
507 uint32 ccrev = sb_corerev(&sii->pub);
509 /* determine numcores - this is the total # cores in the chip */
510 if (((ccrev == 4) || (ccrev >= 6)))
511 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
515 uint chip = CHIPID(sii->pub.chip);
517 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
519 else if (chip == BCM4704_CHIP_ID)
521 else if (chip == BCM5365_CHIP_ID)
524 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
530 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
531 sii->pub.issim ? "QT" : ""));
533 /* scan bridged SB(s) and add results to the end of the list */
534 else if (sii->coreid[next] == OCP_CORE_ID) {
535 sbconfig_t *sb = REGS2SB(sii->curmap);
536 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
539 sii->numcores = next + 1;
541 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
544 if (_sb_coreidx(sii, nsbba) != BADIDX)
547 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
548 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
549 if (sbba == SI_ENUM_BASE)
555 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
557 sii->numcores = i + ncc;
558 return sii->numcores;
561 /* scan the sb enumerated space to identify all cores */
563 sb_scan(si_t *sih, void *regs, uint devid)
570 sb = REGS2SB(sii->curmap);
572 sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
574 /* Save the current core info and validate it later till we know
575 * for sure what is good and what is bad.
577 origsba = _sb_coresba(sii);
579 /* scan all SB(s) starting from SI_ENUM_BASE */
580 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
584 * This function changes logical "focus" to the indicated core;
585 * must be called with interrupts off.
586 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
589 sb_setcoreidx(si_t *sih, uint coreidx)
595 if (coreidx >= sii->numcores)
599 * If the user has provided an interrupt mask enabled function,
600 * then assert interrupts are disabled before switching the core.
602 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
604 sii->curmap = _sb_setcoreidx(sii, coreidx);
605 sii->curidx = coreidx;
607 return (sii->curmap);
610 /* This function changes the logical "focus" to the indicated core.
611 * Return the current core's virtual address.
614 _sb_setcoreidx(si_info_t *sii, uint coreidx)
616 uint32 sbaddr = sii->coresba[coreidx];
619 switch (BUSTYPE(sii->pub.bustype)) {
622 if (!sii->regs[coreidx]) {
623 sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
624 ASSERT(GOODREGS(sii->regs[coreidx]));
626 regs = sii->regs[coreidx];
630 /* point bar0 window */
631 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
636 uint8 tmp = (sbaddr >> 12) & 0x0f;
637 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
638 tmp = (sbaddr >> 16) & 0xff;
639 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
640 tmp = (sbaddr >> 24) & 0xff;
641 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
648 if (!sii->regs[coreidx]) {
649 sii->regs[coreidx] = (void *)(uintptr)sbaddr;
650 ASSERT(GOODREGS(sii->regs[coreidx]));
652 regs = sii->regs[coreidx];
665 /* Return the address of sbadmatch0/1/2/3 register */
666 static volatile uint32 *
667 sb_admatch(si_info_t *sii, uint asidx)
670 volatile uint32 *addrm;
672 sb = REGS2SB(sii->curmap);
676 addrm = &sb->sbadmatch0;
680 addrm = &sb->sbadmatch1;
684 addrm = &sb->sbadmatch2;
688 addrm = &sb->sbadmatch3;
692 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
699 /* Return the number of address spaces in current core */
701 sb_numaddrspaces(si_t *sih)
707 sb = REGS2SB(sii->curmap);
709 /* + 1 because of enumeration space */
710 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
713 /* Return the address of the nth address space in the current core */
715 sb_addrspace(si_t *sih, uint asidx)
721 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
724 /* Return the size of the nth address space in the current core */
726 sb_addrspacesize(si_t *sih, uint asidx)
732 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
736 /* do buffered registers update */
746 origidx = sii->curidx;
747 ASSERT(GOODIDX(origidx));
749 INTR_OFF(sii, intr_val);
751 /* switch over to chipcommon core if there is one, else use pci */
752 if (sii->pub.ccrev != NOREV) {
753 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
754 ASSERT(ccregs != NULL);
756 /* do the buffer registers update */
757 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
758 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
762 /* restore core index */
763 sb_setcoreidx(sih, origidx);
764 INTR_RESTORE(sii, intr_val);
768 sb_core_disable(si_t *sih, uint32 bits)
771 volatile uint32 dummy;
776 ASSERT(GOODREGS(sii->curmap));
777 sb = REGS2SB(sii->curmap);
779 /* if core is already in reset, just return */
780 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
783 /* if clocks are not enabled, put into reset and return */
784 if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
787 /* set target reject and spin until busy is clear (preserve core-specific bits) */
788 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
789 dummy = R_SBREG(sii, &sb->sbtmstatelow);
791 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
792 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
793 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
795 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
796 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
797 dummy = R_SBREG(sii, &sb->sbimstate);
799 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
802 /* set reset and reject while enabling the clocks */
803 W_SBREG(sii, &sb->sbtmstatelow,
804 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
805 SBTML_REJ | SBTML_RESET));
806 dummy = R_SBREG(sii, &sb->sbtmstatelow);
809 /* don't forget to clear the initiator reject bit */
810 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
811 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
814 /* leave reset and reject asserted */
815 W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
819 /* reset and re-enable a core
821 * bits - core specific bits that are set during and after reset sequence
822 * resetbits - core specific bits that are set only during reset sequence
825 sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
829 volatile uint32 dummy;
832 ASSERT(GOODREGS(sii->curmap));
833 sb = REGS2SB(sii->curmap);
836 * Must do the disable sequence first to work for arbitrary current core state.
838 sb_core_disable(sih, (bits | resetbits));
841 * Now do the initialization sequence.
844 /* set reset while enabling the clock and forcing them on throughout the core */
845 W_SBREG(sii, &sb->sbtmstatelow,
846 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
848 dummy = R_SBREG(sii, &sb->sbtmstatelow);
851 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
852 W_SBREG(sii, &sb->sbtmstatehigh, 0);
854 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
855 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
858 /* clear reset and allow it to propagate throughout the core */
859 W_SBREG(sii, &sb->sbtmstatelow,
860 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
861 dummy = R_SBREG(sii, &sb->sbtmstatelow);
864 /* leave clock enabled */
865 W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
866 dummy = R_SBREG(sii, &sb->sbtmstatelow);
871 * Set the initiator timeout for the "master core".
872 * The master core is defined to be the core in control
873 * of the chip and so it issues accesses to non-memory
874 * locations (Because of dma *any* core can access memeory).
876 * The routine uses the bus to decide who is the master:
879 * PCI_BUS => pci or pcie
880 * PCMCIA_BUS => pcmcia
883 * This routine exists so callers can disable initiator
884 * timeouts so accesses to very slow devices like otp
885 * won't cause an abort. The routine allows arbitrary
886 * settings of the service and request timeouts, though.
888 * Returns the timeout state before changing it or -1
892 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
895 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
900 uint32 tmp, ret = 0xffffffff;
905 if ((to & ~TO_MASK) != 0)
908 /* Figure out the master core */
910 switch (BUSTYPE(sii->pub.bustype)) {
912 idx = sii->pub.buscoreidx;
919 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
922 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
931 INTR_OFF(sii, intr_val);
932 origidx = si_coreidx(sih);
934 sb = REGS2SB(sb_setcoreidx(sih, idx));
936 tmp = R_SBREG(sii, &sb->sbimconfiglow);
938 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
941 sb_setcoreidx(sih, origidx);
942 INTR_RESTORE(sii, intr_val);
947 sb_base(uint32 admatch)
952 type = admatch & SBAM_TYPE_MASK;
958 base = admatch & SBAM_BASE0_MASK;
959 } else if (type == 1) {
960 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
961 base = admatch & SBAM_BASE1_MASK;
962 } else if (type == 2) {
963 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
964 base = admatch & SBAM_BASE2_MASK;
971 sb_size(uint32 admatch)
976 type = admatch & SBAM_TYPE_MASK;
982 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
983 } else if (type == 1) {
984 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
985 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
986 } else if (type == 2) {
987 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
988 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);