Merge branch 'feature/move_xtensa_to_esp8266' into 'refactor'

Move xtensa to components

See merge request sdk/ESP8266_RTOS_SDK!30
This commit is contained in:
Wu Jian Gang
2018-04-08 10:59:37 +08:00
34 changed files with 0 additions and 0 deletions

View File

@ -0,0 +1,807 @@
/*
* xtensa/cacheasm.h -- assembler-specific cache related definitions
* that depend on CORE configuration
*
* This file is logically part of xtensa/coreasm.h ,
* but is kept separate for modularity / compilation-performance.
*/
/*
* Copyright (c) 2001-2002, 2006 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTENSA_CACHEASM_H
#define XTENSA_CACHEASM_H
#include <xtensa/coreasm.h>
#include <xtensa/xtensa-xer.h>
/*
* This header file defines assembler macros of the form:
* <x>cache_<func>
* where <x> is 'i' or 'd' for instruction and data caches,
* and <func> indicates the function of the macro.
*
* The following functions <func> are defined,
* and apply only to the specified cache (I or D):
*
* reset
* Resets the cache.
*
* sync
* Makes sure any previous cache instructions have been completed;
* ie. makes sure any previous cache control operations
* have had full effect and been synchronized to memory.
* Eg. any invalidate completed [so as not to generate a hit],
* any writebacks or other pipelined writes written to memory, etc.
*
* invalidate_line (single cache line)
* invalidate_region (specified memory range)
* invalidate_all (entire cache)
* Invalidates all cache entries that cache
* data from the specified memory range.
* NOTE: locked entries are not invalidated.
*
* writeback_line (single cache line)
* writeback_region (specified memory range)
* writeback_all (entire cache)
* Writes back to memory all dirty cache entries
* that cache data from the specified memory range,
* and marks these entries as clean.
* NOTE: on some future implementations, this might
* also invalidate.
* NOTE: locked entries are written back, but never invalidated.
* NOTE: instruction caches never implement writeback.
*
* writeback_inv_line (single cache line)
* writeback_inv_region (specified memory range)
* writeback_inv_all (entire cache)
* Writes back to memory all dirty cache entries
* that cache data from the specified memory range,
* and invalidates these entries (including all clean
* cache entries that cache data from that range).
* NOTE: locked entries are written back but not invalidated.
* NOTE: instruction caches never implement writeback.
*
* lock_line (single cache line)
* lock_region (specified memory range)
* Prefetch and lock the specified memory range into cache.
* NOTE: if any part of the specified memory range cannot
* be locked, a Load/Store Error (for dcache) or Instruction
* Fetch Error (for icache) exception occurs. These macros don't
* do anything special (yet anyway) to handle this situation.
*
* unlock_line (single cache line)
* unlock_region (specified memory range)
* unlock_all (entire cache)
* Unlock cache entries that cache the specified memory range.
* Entries not already locked are unaffected.
*
* coherence_on
* coherence_off
* Turn off and on cache coherence
*
*/
/*************************** GENERIC -- ALL CACHES ***************************/
/*
* The following macros assume the following cache size/parameter limits
* in the current Xtensa core implementation:
* cache size: 1024 bytes minimum
* line size: 16 - 64 bytes
* way count: 1 - 4
*
* Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4
* Hence the assumption that each loop can execute four cache instructions.
*
* Correspondingly, the offset range of instructions is assumed able to cover
* four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for
* both hit and indexed cache instructions. Ie. these offsets are all
* valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64).
* This is true of all original cache instructions
* (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets
* of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2).
* This is also true of subsequent cache instructions
* (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets
* of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4).
*
* (Maximum cache size, currently 32k, doesn't affect the following macros.
* Cache ways > MMU min page size cause aliasing but that's another matter.)
*/
/*
* Macro to apply an 'indexed' cache instruction to the entire cache.
*
* Parameters:
* cainst instruction/ that takes an address register parameter
* and an offset parameter (in range 0 .. 3*linesize).
* size size of cache in bytes
* linesize size of cache line in bytes (always power-of-2)
* assoc_or1 number of associativities (ways/sets) in cache
* if all sets affected by cainst,
* or 1 if only one set (or not all sets) of the cache
* is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]).
* aa, ab unique address registers (temporaries)
* loopokay 1 (default) allows use of zero-overhead loops, 0 does not
* immrange range (max value) of cainst's immediate offset parameter, in bytes
* (NOTE: macro assumes immrange allows power-of-2 number of lines)
*/
.macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab, loopokay=1, maxofs=240
// Number of indices in cache (lines per way):
.set .Lindices, (\size / (\linesize * \assoc_or1))
// Number of indices processed per loop iteration (max 4):
.set .Lperloop, .Lindices
.ifgt .Lperloop - 4
.set .Lperloop, 4
.endif
// Also limit instructions per loop if cache line size exceeds immediate range:
.set .Lmaxperloop, (\maxofs / \linesize) + 1
.ifgt .Lperloop - .Lmaxperloop
.set .Lperloop, .Lmaxperloop
.endif
// Avoid addi of 128 which takes two instructions (addmi,addi):
.ifeq .Lperloop*\linesize - 128
.ifgt .Lperloop - 1
.set .Lperloop, .Lperloop / 2
.endif
.endif
// \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst.
.ifne (\loopokay & XCHAL_HAVE_LOOPS)
movi \aa, .Lindices / .Lperloop // number of loop iterations
// Possible improvement: need only loop if \aa > 1 ;
// however \aa == 1 is highly unlikely.
movi \ab, 0 // to iterate over cache
loop \aa, .Lend_cachex\@
.set .Li, 0 ; .rept .Lperloop
\cainst \ab, .Li*\linesize
.set .Li, .Li+1 ; .endr
addi \ab, \ab, .Lperloop*\linesize // move to next line
.Lend_cachex\@:
.else
movi \aa, (\size / \assoc_or1)
// Possible improvement: need only loop if \aa > 1 ;
// however \aa == 1 is highly unlikely.
movi \ab, 0 // to iterate over cache
.Lstart_cachex\@:
.set .Li, 0 ; .rept .Lperloop
\cainst \ab, .Li*\linesize
.set .Li, .Li+1 ; .endr
addi \ab, \ab, .Lperloop*\linesize // move to next line
bltu \ab, \aa, .Lstart_cachex\@
.endif
.endm
/*
* Macro to apply a 'hit' cache instruction to a memory region,
* ie. to any cache entries that cache a specified portion (region) of memory.
* Takes care of the unaligned cases, ie. may apply to one
* more cache line than $asize / lineSize if $aaddr is not aligned.
*
*
* Parameters are:
* cainst instruction/macro that takes an address register parameter
* and an offset parameter (currently always zero)
* and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.)
* linesize_log2 log2(size of cache line in bytes)
* addr register containing start address of region (clobbered)
* asize register containing size of the region in bytes (clobbered)
* askew unique register used as temporary
*
* Note: A possible optimization to this macro is to apply the operation
* to the entire cache if the region exceeds the size of the cache
* by some empirically determined amount or factor. Some experimentation
* is required to determine the appropriate factors, which also need
* to be tunable if required.
*/
.macro cache_hit_region cainst, linesize_log2, addr, asize, askew
// Make \asize the number of iterations:
extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr
add \asize, \asize, \askew // ... and add it to \asize
addi \asize, \asize, (1 << \linesize_log2) - 1 // round up!
srli \asize, \asize, \linesize_log2
// Iterate over region:
floopnez \asize, cacheh\@
\cainst \addr, 0
addi \addr, \addr, (1 << \linesize_log2) // move to next line
floopend \asize, cacheh\@
.endm
/*************************** INSTRUCTION CACHE ***************************/
/*
* Reset/initialize the instruction cache by simply invalidating it:
* (need to unlock first also, if cache locking implemented):
*
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro icache_reset aa, ab, loopokay=0
icache_unlock_all \aa, \ab, \loopokay
icache_invalidate_all \aa, \ab, \loopokay
.endm
/*
* Synchronize after an instruction cache operation,
* to be sure everything is in sync with memory as to be
* expected following any previous instruction cache control operations.
*
* Even if a config doesn't have caches, an isync is still needed
* when instructions in any memory are modified, whether by a loader
* or self-modifying code. Therefore, this macro always produces
* an isync, whether or not an icache is present.
*
* Parameters are:
* ar an address register (temporary) (currently unused, but may be used in future)
*/
.macro icache_sync ar
isync
.endm
/*
* Invalidate a single line of the instruction cache.
* Parameters are:
* ar address register that contains (virtual) address to invalidate
* (may get clobbered in a future implementation, but not currently)
* offset (optional) offset to add to \ar to compute effective address to invalidate
* (note: some number of lsbits are ignored)
*/
.macro icache_invalidate_line ar, offset
#if XCHAL_ICACHE_SIZE > 0
ihi \ar, \offset // invalidate icache line
/*
* NOTE: in some early version of a test chip silicon (SiChip1),
* 'ihi' didn't work, so software had to replace it with
* the much more draconian 'iii'
* (which would just invalidate more than it should,
* which should be okay other than the performance hit
* because cache locking did not exist in that version,
* unless user somehow relies on something being cached).
*
* #if ... targeting this ancient, now non-existent, test chip silicon ...
* iii \ar, \offset
* #endif
*/
icache_sync \ar
#endif
.endm
/*
* Invalidate instruction cache entries that cache a specified portion of memory.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro icache_invalidate_region astart, asize, ac
#if XCHAL_ICACHE_SIZE > 0
// Instruction cache region invalidation:
cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
icache_sync \ac
// End of instruction cache region invalidation
#endif
.endm
/*
* Invalidate entire instruction cache.
*
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro icache_invalidate_all aa, ab, loopokay=1
#if XCHAL_ICACHE_SIZE > 0
// Instruction cache invalidation:
cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab, \loopokay, 1020
icache_sync \aa
// End of instruction cache invalidation
#endif
.endm
/*
* Lock (prefetch & lock) a single line of the instruction cache.
*
* Parameters are:
* ar address register that contains (virtual) address to lock
* (may get clobbered in a future implementation, but not currently)
* offset offset to add to \ar to compute effective address to lock
* (note: some number of lsbits are ignored)
*/
.macro icache_lock_line ar, offset
#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
ipfl \ar, \offset /* prefetch and lock icache line */
icache_sync \ar
#endif
.endm
/*
* Lock (prefetch & lock) a specified portion of memory into the instruction cache.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro icache_lock_region astart, asize, ac
#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
// Instruction cache region lock:
cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
icache_sync \ac
// End of instruction cache region lock
#endif
.endm
/*
* Unlock a single line of the instruction cache.
*
* Parameters are:
* ar address register that contains (virtual) address to unlock
* (may get clobbered in a future implementation, but not currently)
* offset offset to add to \ar to compute effective address to unlock
* (note: some number of lsbits are ignored)
*/
.macro icache_unlock_line ar, offset
#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
ihu \ar, \offset /* unlock icache line */
icache_sync \ar
#endif
.endm
/*
* Unlock a specified portion of memory from the instruction cache.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro icache_unlock_region astart, asize, ac
#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
// Instruction cache region unlock:
cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
icache_sync \ac
// End of instruction cache region unlock
#endif
.endm
/*
* Unlock entire instruction cache.
*
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro icache_unlock_all aa, ab, loopokay=1
#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
// Instruction cache unlock:
cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab, \loopokay
icache_sync \aa
// End of instruction cache unlock
#endif
.endm
/*************************** DATA CACHE ***************************/
/*
* Reset/initialize the data cache by simply invalidating it
* (need to unlock first also, if cache locking implemented):
*
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro dcache_reset aa, ab, loopokay=0
dcache_unlock_all \aa, \ab, \loopokay
dcache_invalidate_all \aa, \ab, \loopokay
.endm
/*
* Synchronize after a data cache operation,
* to be sure everything is in sync with memory as to be
* expected following any previous data cache control operations.
*
* Parameters are:
* ar an address register (temporary) (currently unused, but may be used in future)
*/
.macro dcache_sync ar
#if XCHAL_DCACHE_SIZE > 0
// This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient:
//memw // synchronize data cache changes relative to subsequent memory accesses
//isync // be conservative and ISYNC as well (just to be sure)
dsync
#endif
.endm
/*
* Opt into cache coherence.
*
* Parameters are:
* ar,at two scratch address registers (both clobbered)
*/
.macro cache_coherence_on ar at
#if XCHAL_HAVE_EXTERN_REGS && XCHAL_DCACHE_IS_COHERENT
movi \ar, 1
movi \at, XER_CCON
wer \ar, \at
extw
# endif
.endm
/*
* Opt out of cache coherence.
* NOTE: this is generally preceded by emptying the cache;
* see xthal_cache_coherence_optout() in hal/coherence.c for details.
*
* Parameters are:
* ar,at two scratch address registers (both clobbered)
*/
.macro cache_coherence_off ar at
#if XCHAL_HAVE_EXTERN_REGS && XCHAL_DCACHE_IS_COHERENT
extw
movi \at, 0
movi \ar, XER_CCON
wer \at, \ar
extw
#endif
.endm
/*
* Synchronize after a data store operation,
* to be sure the stored data is completely off the processor
* (and assuming there is no buffering outside the processor,
* that the data is in memory). This may be required to
* ensure that the processor's write buffers are emptied.
* A MEMW followed by a read guarantees this, by definition.
* We also try to make sure the read itself completes.
*
* Parameters are:
* ar an address register (temporary)
*/
.macro write_sync ar
memw // ensure previous memory accesses are complete prior to subsequent memory accesses
l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW
//slot
add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures)
.endm
/*
* Invalidate a single line of the data cache.
* Parameters are:
* ar address register that contains (virtual) address to invalidate
* (may get clobbered in a future implementation, but not currently)
* offset (optional) offset to add to \ar to compute effective address to invalidate
* (note: some number of lsbits are ignored)
*/
.macro dcache_invalidate_line ar, offset
#if XCHAL_DCACHE_SIZE > 0
dhi \ar, \offset
dcache_sync \ar
#endif
.endm
/*
* Invalidate data cache entries that cache a specified portion of memory.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro dcache_invalidate_region astart, asize, ac
#if XCHAL_DCACHE_SIZE > 0
// Data cache region invalidation:
cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
dcache_sync \ac
// End of data cache region invalidation
#endif
.endm
#if 0
/*
* This is a work-around for a bug in SiChip1.
* To enable the work-around, uncomment this and replace 'dii'
* with 'dii_s1' everywhere, eg. in the dcache_invalidate_all
* macro below.
*/
.macro dii_s1 ar, offset
dii \ar, \offset
or \ar, \ar, \ar
or \ar, \ar, \ar
or \ar, \ar, \ar
or \ar, \ar, \ar
.endm
#endif
/*
* Invalidate entire data cache.
*
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro dcache_invalidate_all aa, ab, loopokay=1
#if XCHAL_DCACHE_SIZE > 0
// Data cache invalidation:
cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab, \loopokay, 1020
dcache_sync \aa
// End of data cache invalidation
#endif
.endm
/*
* Writeback a single line of the data cache.
* Parameters are:
* ar address register that contains (virtual) address to writeback
* (may get clobbered in a future implementation, but not currently)
* offset offset to add to \ar to compute effective address to writeback
* (note: some number of lsbits are ignored)
*/
.macro dcache_writeback_line ar, offset
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
dhwb \ar, \offset
dcache_sync \ar
#endif
.endm
/*
* Writeback dirty data cache entries that cache a specified portion of memory.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro dcache_writeback_region astart, asize, ac
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
// Data cache region writeback:
cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
dcache_sync \ac
// End of data cache region writeback
#endif
.endm
/*
* Writeback entire data cache.
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro dcache_writeback_all aa, ab, loopokay=1
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
// Data cache writeback:
cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay
dcache_sync \aa
// End of data cache writeback
#endif
.endm
/*
* Writeback and invalidate a single line of the data cache.
* Parameters are:
* ar address register that contains (virtual) address to writeback and invalidate
* (may get clobbered in a future implementation, but not currently)
* offset offset to add to \ar to compute effective address to writeback and invalidate
* (note: some number of lsbits are ignored)
*/
.macro dcache_writeback_inv_line ar, offset
#if XCHAL_DCACHE_SIZE > 0
dhwbi \ar, \offset /* writeback and invalidate dcache line */
dcache_sync \ar
#endif
.endm
/*
* Writeback and invalidate data cache entries that cache a specified portion of memory.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro dcache_writeback_inv_region astart, asize, ac
#if XCHAL_DCACHE_SIZE > 0
// Data cache region writeback and invalidate:
cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
dcache_sync \ac
// End of data cache region writeback and invalidate
#endif
.endm
/*
* Writeback and invalidate entire data cache.
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro dcache_writeback_inv_all aa, ab, loopokay=1
#if XCHAL_DCACHE_SIZE > 0
// Data cache writeback and invalidate:
#if XCHAL_DCACHE_IS_WRITEBACK
cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay
dcache_sync \aa
#else /*writeback*/
// Data cache does not support writeback, so just invalidate: */
dcache_invalidate_all \aa, \ab, \loopokay
#endif /*writeback*/
// End of data cache writeback and invalidate
#endif
.endm
/*
* Lock (prefetch & lock) a single line of the data cache.
*
* Parameters are:
* ar address register that contains (virtual) address to lock
* (may get clobbered in a future implementation, but not currently)
* offset offset to add to \ar to compute effective address to lock
* (note: some number of lsbits are ignored)
*/
.macro dcache_lock_line ar, offset
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
dpfl \ar, \offset /* prefetch and lock dcache line */
dcache_sync \ar
#endif
.endm
/*
* Lock (prefetch & lock) a specified portion of memory into the data cache.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro dcache_lock_region astart, asize, ac
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
// Data cache region lock:
cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
dcache_sync \ac
// End of data cache region lock
#endif
.endm
/*
* Unlock a single line of the data cache.
*
* Parameters are:
* ar address register that contains (virtual) address to unlock
* (may get clobbered in a future implementation, but not currently)
* offset offset to add to \ar to compute effective address to unlock
* (note: some number of lsbits are ignored)
*/
.macro dcache_unlock_line ar, offset
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
dhu \ar, \offset /* unlock dcache line */
dcache_sync \ar
#endif
.endm
/*
* Unlock a specified portion of memory from the data cache.
* Parameters are:
* astart start address (register gets clobbered)
* asize size of the region in bytes (register gets clobbered)
* ac unique register used as temporary
*/
.macro dcache_unlock_region astart, asize, ac
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
// Data cache region unlock:
cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
dcache_sync \ac
// End of data cache region unlock
#endif
.endm
/*
* Unlock entire data cache.
*
* Parameters:
* aa, ab unique address registers (temporaries)
*/
.macro dcache_unlock_all aa, ab, loopokay=1
#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
// Data cache unlock:
cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay
dcache_sync \aa
// End of data cache unlock
#endif
.endm
#endif /*XTENSA_CACHEASM_H*/

View File

@ -0,0 +1,436 @@
/*
* xtensa/cacheattrasm.h -- assembler-specific CACHEATTR register related definitions
* that depend on CORE configuration
*
* This file is logically part of xtensa/coreasm.h (or perhaps xtensa/cacheasm.h),
* but is kept separate for modularity / compilation-performance.
*/
/*
* Copyright (c) 2001-2009 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTENSA_CACHEATTRASM_H
#define XTENSA_CACHEATTRASM_H
#include <xtensa/coreasm.h>
/* Determine whether cache attributes are controlled using eight 512MB entries: */
#define XCHAL_CA_8X512 (XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \
|| (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY))
/*
* This header file defines assembler macros of the form:
* <x>cacheattr_<func>
* where:
* <x> is 'i', 'd' or absent for instruction, data
* or both caches; and
* <func> indicates the function of the macro.
*
* The following functions are defined:
*
* icacheattr_get
* Reads I-cache CACHEATTR into a2 (clobbers a3-a5).
*
* dcacheattr_get
* Reads D-cache CACHEATTR into a2 (clobbers a3-a5).
* (Note: for configs with a real CACHEATTR register, the
* above two macros are identical.)
*
* cacheattr_set
* Writes both I-cache and D-cache CACHEATTRs from a2 (a3-a8 clobbered).
* Works even when changing one's own code's attributes.
*
* icacheattr_is_enabled label
* Branches to \label if I-cache appears to have been enabled
* (eg. if CACHEATTR contains a cache-enabled attribute).
* (clobbers a2-a5,SAR)
*
* dcacheattr_is_enabled label
* Branches to \label if D-cache appears to have been enabled
* (eg. if CACHEATTR contains a cache-enabled attribute).
* (clobbers a2-a5,SAR)
*
* cacheattr_is_enabled label
* Branches to \label if either I-cache or D-cache appears to have been enabled
* (eg. if CACHEATTR contains a cache-enabled attribute).
* (clobbers a2-a5,SAR)
*
* The following macros are only defined under certain conditions:
*
* icacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
* Writes I-cache CACHEATTR from a2 (a3-a8 clobbered).
*
* dcacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
* Writes D-cache CACHEATTR from a2 (a3-a8 clobbered).
*/
/*************************** GENERIC -- ALL CACHES ***************************/
/*
* _cacheattr_get
*
* (Internal macro.)
* Returns value of CACHEATTR register (or closest equivalent) in a2.
*
* Entry:
* (none)
* Exit:
* a2 value read from CACHEATTR
* a3-a5 clobbered (temporaries)
*/
.macro _cacheattr_get tlb
#if XCHAL_HAVE_CACHEATTR
rsr a2, CACHEATTR
#elif XCHAL_CA_8X512
// We have a config that "mimics" CACHEATTR using a simplified
// "MMU" composed of a single statically-mapped way.
// DTLB and ITLB are independent, so there's no single
// cache attribute that can describe both. So for now
// just return the DTLB state.
movi a5, 0xE0000000
movi a2, 0
movi a3, XCHAL_SPANNING_WAY
1: add a3, a3, a5 // next segment
r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0
dsync // interlock???
slli a2, a2, 4
extui a4, a4, 0, 4 // extract CA
or a2, a2, a4
bgeui a3, 16, 1b
#else
// This macro isn't applicable to arbitrary MMU configurations.
// Just return zero.
movi a2, 0
#endif
.endm
.macro icacheattr_get
_cacheattr_get itlb
.endm
.macro dcacheattr_get
_cacheattr_get dtlb
.endm
/* Default (powerup/reset) value of CACHEATTR,
all BYPASS mode (ie. disabled/bypassed caches): */
#if XCHAL_HAVE_PTP_MMU
# define XCHAL_CACHEATTR_ALL_BYPASS 0x33333333
#else
# define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222
#endif
#if XCHAL_CA_8X512
#if XCHAL_HAVE_PTP_MMU
# define XCHAL_FCA_ENAMASK 0x0AA0 /* bitmap of fetch attributes that require enabled icache */
# define XCHAL_LCA_ENAMASK 0x0FF0 /* bitmap of load attributes that require enabled dcache */
# define XCHAL_SCA_ENAMASK 0x0CC0 /* bitmap of store attributes that require enabled dcache */
#else
# define XCHAL_FCA_ENAMASK 0x003A /* bitmap of fetch attributes that require enabled icache */
# define XCHAL_LCA_ENAMASK 0x0033 /* bitmap of load attributes that require enabled dcache */
# define XCHAL_SCA_ENAMASK 0x0033 /* bitmap of store attributes that require enabled dcache */
#endif
#define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */
#define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */
/*
* _cacheattr_is_enabled
*
* (Internal macro.)
* Branches to \label if CACHEATTR in a2 indicates an enabled
* cache, using mask in a3.
*
* Parameters:
* label where to branch to if cache is enabled
* Entry:
* a2 contains CACHEATTR value used to determine whether
* caches are enabled
* a3 16-bit constant where each bit correspond to
* one of the 16 possible CA values (in a CACHEATTR mask);
* CA values that indicate the cache is enabled
* have their corresponding bit set in this mask
* (eg. use XCHAL_xCA_ENAMASK , above)
* Exit:
* a2,a4,a5 clobbered
* SAR clobbered
*/
.macro _cacheattr_is_enabled label
movi a4, 8 // loop 8 times
.Lcaife\@:
extui a5, a2, 0, 4 // get CA nibble
ssr a5 // index into mask according to CA...
srl a5, a3 // ...and get CA's mask bit in a5 bit 0
bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label
srli a2, a2, 4 // next nibble
addi a4, a4, -1
bnez a4, .Lcaife\@ // loop for each nibble
.endm
#else /* XCHAL_CA_8X512 */
.macro _cacheattr_is_enabled label
j \label // macro not applicable, assume caches always enabled
.endm
#endif /* XCHAL_CA_8X512 */
/*
* icacheattr_is_enabled
*
* Branches to \label if I-cache is enabled.
*
* Parameters:
* label where to branch to if icache is enabled
* Entry:
* (none)
* Exit:
* a2-a5, SAR clobbered (temporaries)
*/
.macro icacheattr_is_enabled label
#if XCHAL_CA_8X512
icacheattr_get
movi a3, XCHAL_FCA_ENAMASK
#endif
_cacheattr_is_enabled \label
.endm
/*
* dcacheattr_is_enabled
*
* Branches to \label if D-cache is enabled.
*
* Parameters:
* label where to branch to if dcache is enabled
* Entry:
* (none)
* Exit:
* a2-a5, SAR clobbered (temporaries)
*/
.macro dcacheattr_is_enabled label
#if XCHAL_CA_8X512
dcacheattr_get
movi a3, XCHAL_LSCA_ENAMASK
#endif
_cacheattr_is_enabled \label
.endm
/*
* cacheattr_is_enabled
*
* Branches to \label if either I-cache or D-cache is enabled.
*
* Parameters:
* label where to branch to if a cache is enabled
* Entry:
* (none)
* Exit:
* a2-a5, SAR clobbered (temporaries)
*/
.macro cacheattr_is_enabled label
#if XCHAL_HAVE_CACHEATTR
rsr a2, CACHEATTR
movi a3, XCHAL_ALLCA_ENAMASK
#elif XCHAL_CA_8X512
icacheattr_get
movi a3, XCHAL_FCA_ENAMASK
_cacheattr_is_enabled \label
dcacheattr_get
movi a3, XCHAL_LSCA_ENAMASK
#endif
_cacheattr_is_enabled \label
.endm
/*
* The ISA does not have a defined way to change the
* instruction cache attributes of the running code,
* ie. of the memory area that encloses the current PC.
* However, each micro-architecture (or class of
* configurations within a micro-architecture)
* provides a way to deal with this issue.
*
* Here are a few macros used to implement the relevant
* approach taken.
*/
#if XCHAL_CA_8X512 && !XCHAL_HAVE_CACHEATTR
// We have a config that "mimics" CACHEATTR using a simplified
// "MMU" composed of a single statically-mapped way.
/*
* icacheattr_set
*
* Entry:
* a2 cacheattr value to set
* Exit:
* a2 unchanged
* a3-a8 clobbered (temporaries)
*/
.macro icacheattr_set
movi a5, 0xE0000000 // mask of upper 3 bits
movi a6, 3f // PC where ITLB is set
movi a3, XCHAL_SPANNING_WAY // start at region 0 (0 .. 7)
mov a7, a2 // copy a2 so it doesn't get clobbered
and a6, a6, a5 // upper 3 bits of local PC area
j 3f
// Use micro-architecture specific method.
// The following 4-instruction sequence is aligned such that
// it all fits within a single I-cache line. Sixteen byte
// alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE
// actually causes problems because that can be greater than
// the alignment of the reset vector, where this macro is often
// invoked, which would cause the linker to align the reset
// vector code away from the reset vector!!).
.begin no-transform
.align 16 /*XCHAL_ICACHE_LINESIZE*/
1: witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB
isync
.end no-transform
nop
nop
sub a3, a3, a5 // next segment (add 0x20000000)
bltui a3, 16, 4f // done?
// Note that in the WITLB loop, we don't do any load/stores
// (may not be an issue here, but it is important in the DTLB case).
2: srli a7, a7, 4 // next CA
3:
# if XCHAL_HAVE_MIMIC_CACHEATTR
extui a4, a7, 0, 4 // extract CA to set
# else /* have translation, preserve it: */
ritlb1 a8, a3 // get current PPN+CA of segment
//dsync // interlock???
extui a4, a7, 0, 4 // extract CA to set
srli a8, a8, 4 // clear CA but keep PPN ...
slli a8, a8, 4 // ...
add a4, a4, a8 // combine new CA with PPN to preserve
# endif
beq a3, a6, 1b // current PC's region? if so, do it in a safe way
witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB
sub a3, a3, a5 // next segment (add 0x20000000)
bgeui a3, 16, 2b
isync // make sure all ifetch changes take effect
4:
.endm // icacheattr_set
/*
* dcacheattr_set
*
* Entry:
* a2 cacheattr value to set
* Exit:
* a2 unchanged
* a3-a8 clobbered (temporaries)
*/
.macro dcacheattr_set
movi a5, 0xE0000000 // mask of upper 3 bits
movi a3, XCHAL_SPANNING_WAY // start at region 0 (0 .. 7)
mov a7, a2 // copy a2 so it doesn't get clobbered
// Note that in the WDTLB loop, we don't do any load/stores
2: // (including implicit l32r via movi) because it isn't safe.
# if XCHAL_HAVE_MIMIC_CACHEATTR
extui a4, a7, 0, 4 // extract CA to set
# else /* have translation, preserve it: */
rdtlb1 a8, a3 // get current PPN+CA of segment
//dsync // interlock???
extui a4, a7, 0, 4 // extract CA to set
srli a8, a8, 4 // clear CA but keep PPN ...
slli a8, a8, 4 // ...
add a4, a4, a8 // combine new CA with PPN to preserve
# endif
wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB
sub a3, a3, a5 // next segment (add 0x20000000)
srli a7, a7, 4 // next CA
bgeui a3, 16, 2b
dsync // make sure all data path changes take effect
.endm // dcacheattr_set
#endif /* XCHAL_CA_8X512 && !XCHAL_HAVE_CACHEATTR */
/*
* cacheattr_set
*
* Macro that sets the current CACHEATTR safely
* (both i and d) according to the current contents of a2.
* It works even when changing the cache attributes of
* the currently running code.
*
* Entry:
* a2 cacheattr value to set
* Exit:
* a2 unchanged
* a3-a8 clobbered (temporaries)
*/
.macro cacheattr_set
#if XCHAL_HAVE_CACHEATTR
# if XCHAL_ICACHE_LINESIZE < 4
// No i-cache, so can always safely write to CACHEATTR:
wsr a2, CACHEATTR
# else
// The Athens micro-architecture, when using the old
// exception architecture option (ie. with the CACHEATTR register)
// allows changing the cache attributes of the running code
// using the following exact sequence aligned to be within
// an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE
// alignment actually causes problems because that can be greater
// than the alignment of the reset vector, where this macro is often
// invoked, which would cause the linker to align the reset
// vector code away from the reset vector!!).
j 1f
.begin no-transform
.align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line
1: wsr a2, CACHEATTR
isync
.end no-transform
nop
nop
# endif
#elif XCHAL_CA_8X512
// DTLB and ITLB are independent, but to keep semantics
// of this macro we simply write to both.
icacheattr_set
dcacheattr_set
#else
// This macro isn't applicable to arbitrary MMU configurations.
// Do nothing in this case.
#endif
.endm
#endif /*XTENSA_CACHEATTRASM_H*/

View File

@ -0,0 +1,459 @@
/*
* xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
* processor CORE configuration
*
* See <xtensa/config/core.h>, which includes this file, for more details.
*/
/* Xtensa processor core configuration information.
Customer ID=7011; Build=0x2b6f6; Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_CONFIGURATION_H
#define _XTENSA_CORE_CONFIGURATION_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
****************************************************************************/
/*
* Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
* configured, and a value of 0 otherwise. These macros are always defined.
*/
/*----------------------------------------------------------------------
ISA
----------------------------------------------------------------------*/
#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
#define XCHAL_HAVE_WINDOWED 0 /* windowed registers option */
#define XCHAL_NUM_AREGS 16 /* num of physical addr regs */
#define XCHAL_NUM_AREGS_LOG2 4 /* log2(XCHAL_NUM_AREGS) */
#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
#define XCHAL_HAVE_DEBUG 1 /* debug option */
#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
#define XCHAL_HAVE_LOOPS 0 /* zero-overhead loops */
#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */
#define XCHAL_HAVE_L32R 1 /* L32R instruction */
#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
#define XCHAL_HAVE_CALL4AND12 0 /* (obsolete option) */
#define XCHAL_HAVE_ABS 1 /* ABS instruction */
/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
#define XCHAL_HAVE_SPECULATION 0 /* speculation */
#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
#define XCHAL_NUM_CONTEXTS 1 /* */
#define XCHAL_NUM_MISC_REGS 0 /* num of scratch regs (0..4) */
#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
#define XCHAL_HAVE_PRID 1 /* processor ID register */
#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */
#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
#define XCHAL_HAVE_FP 0 /* floating point pkg */
#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
/*----------------------------------------------------------------------
MISC
----------------------------------------------------------------------*/
#define XCHAL_NUM_WRITEBUFFER_ENTRIES 1 /* size of write buffer */
#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
/* In T1050, applies to selected core load and store instructions (see ISA): */
#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
#define XCHAL_SW_VERSION 800001 /* sw version of this header */
#define XCHAL_CORE_ID "lx106" /* alphanum core name
(CoreID) set in the Xtensa
Processor Generator */
#define XCHAL_BUILD_UNIQUE_ID 0x0002B6F6 /* 22-bit sw build ID */
/*
* These definitions describe the hardware targeted by this software.
*/
#define XCHAL_HW_CONFIGID0 0xC28CDAFA /* ConfigID hi 32 bits*/
#define XCHAL_HW_CONFIGID1 0x1082B6F6 /* ConfigID lo 32 bits*/
#define XCHAL_HW_VERSION_NAME "LX3.0.1" /* full version name */
#define XCHAL_HW_VERSION_MAJOR 2300 /* major ver# of targeted hw */
#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
#define XCHAL_HW_VERSION 230001 /* major*100+minor */
#define XCHAL_HW_REL_LX3 1
#define XCHAL_HW_REL_LX3_0 1
#define XCHAL_HW_REL_LX3_0_1 1
#define XCHAL_HW_CONFIGID_RELIABLE 1
/* If software targets a *range* of hardware versions, these are the bounds: */
#define XCHAL_HW_MIN_VERSION_MAJOR 2300 /* major v of earliest tgt hw */
#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
#define XCHAL_HW_MIN_VERSION 230001 /* earliest targeted hw */
#define XCHAL_HW_MAX_VERSION_MAJOR 2300 /* major v of latest tgt hw */
#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
#define XCHAL_HW_MAX_VERSION 230001 /* latest targeted hw */
/*----------------------------------------------------------------------
CACHE
----------------------------------------------------------------------*/
#define XCHAL_ICACHE_LINESIZE 4 /* I-cache line size in bytes */
#define XCHAL_DCACHE_LINESIZE 4 /* D-cache line size in bytes */
#define XCHAL_ICACHE_LINEWIDTH 2 /* log2(I line size in bytes) */
#define XCHAL_DCACHE_LINEWIDTH 2 /* log2(D line size in bytes) */
#define XCHAL_ICACHE_SIZE 0 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 0 /* D-cache size in bytes or 0 */
#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
/****************************************************************************
Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
****************************************************************************/
#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
/*----------------------------------------------------------------------
CACHE
----------------------------------------------------------------------*/
#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
/* Number of cache sets in log2(lines per way): */
#define XCHAL_ICACHE_SETWIDTH 0
#define XCHAL_DCACHE_SETWIDTH 0
/* Cache set associativity (number of ways): */
#define XCHAL_ICACHE_WAYS 1
#define XCHAL_DCACHE_WAYS 1
/* Cache features: */
#define XCHAL_ICACHE_LINE_LOCKABLE 0
#define XCHAL_DCACHE_LINE_LOCKABLE 0
#define XCHAL_ICACHE_ECC_PARITY 0
#define XCHAL_DCACHE_ECC_PARITY 0
/* Cache access size in bytes (affects operation of SICW instruction): */
#define XCHAL_ICACHE_ACCESS_SIZE 1
#define XCHAL_DCACHE_ACCESS_SIZE 1
/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
#define XCHAL_CA_BITS 4
/*----------------------------------------------------------------------
INTERNAL I/D RAM/ROMs and XLMI
----------------------------------------------------------------------*/
#define XCHAL_NUM_INSTROM 1 /* number of core instr. ROMs */
#define XCHAL_NUM_INSTRAM 2 /* number of core instr. RAMs */
#define XCHAL_NUM_DATAROM 1 /* number of core data ROMs */
#define XCHAL_NUM_DATARAM 2 /* number of core data RAMs */
#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */
/* Instruction ROM 0: */
#define XCHAL_INSTROM0_VADDR 0x40200000
#define XCHAL_INSTROM0_PADDR 0x40200000
#define XCHAL_INSTROM0_SIZE 1048576
#define XCHAL_INSTROM0_ECC_PARITY 0
/* Instruction RAM 0: */
#define XCHAL_INSTRAM0_VADDR 0x40000000
#define XCHAL_INSTRAM0_PADDR 0x40000000
#define XCHAL_INSTRAM0_SIZE 1048576
#define XCHAL_INSTRAM0_ECC_PARITY 0
/* Instruction RAM 1: */
#define XCHAL_INSTRAM1_VADDR 0x40100000
#define XCHAL_INSTRAM1_PADDR 0x40100000
#define XCHAL_INSTRAM1_SIZE 1048576
#define XCHAL_INSTRAM1_ECC_PARITY 0
/* Data ROM 0: */
#define XCHAL_DATAROM0_VADDR 0x3FF40000
#define XCHAL_DATAROM0_PADDR 0x3FF40000
#define XCHAL_DATAROM0_SIZE 262144
#define XCHAL_DATAROM0_ECC_PARITY 0
/* Data RAM 0: */
#define XCHAL_DATARAM0_VADDR 0x3FFC0000
#define XCHAL_DATARAM0_PADDR 0x3FFC0000
#define XCHAL_DATARAM0_SIZE 262144
#define XCHAL_DATARAM0_ECC_PARITY 0
/* Data RAM 1: */
#define XCHAL_DATARAM1_VADDR 0x3FF80000
#define XCHAL_DATARAM1_PADDR 0x3FF80000
#define XCHAL_DATARAM1_SIZE 262144
#define XCHAL_DATARAM1_ECC_PARITY 0
/* XLMI Port 0: */
#define XCHAL_XLMI0_VADDR 0x3FF00000
#define XCHAL_XLMI0_PADDR 0x3FF00000
#define XCHAL_XLMI0_SIZE 262144
#define XCHAL_XLMI0_ECC_PARITY 0
/*----------------------------------------------------------------------
INTERRUPTS and TIMERS
----------------------------------------------------------------------*/
#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
#define XCHAL_NUM_TIMERS 1 /* number of CCOMPAREn regs */
#define XCHAL_NUM_INTERRUPTS 15 /* number of interrupts */
#define XCHAL_NUM_INTERRUPTS_LOG2 4 /* ceil(log2(NUM_INTERRUPTS)) */
#define XCHAL_NUM_EXTINTERRUPTS 13 /* num of external interrupts */
#define XCHAL_NUM_INTLEVELS 2 /* number of interrupt levels
(not including level zero) */
#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
/* Masks of interrupts at each interrupt level: */
#define XCHAL_INTLEVEL1_MASK 0x00003FFF
#define XCHAL_INTLEVEL2_MASK 0x00000000
#define XCHAL_INTLEVEL3_MASK 0x00004000
#define XCHAL_INTLEVEL4_MASK 0x00000000
#define XCHAL_INTLEVEL5_MASK 0x00000000
#define XCHAL_INTLEVEL6_MASK 0x00000000
#define XCHAL_INTLEVEL7_MASK 0x00000000
/* Masks of interrupts at each range 1..n of interrupt levels: */
#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x00003FFF
#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x00003FFF
#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x00007FFF
#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x00007FFF
#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x00007FFF
#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x00007FFF
#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x00007FFF
/* Level of each interrupt: */
#define XCHAL_INT0_LEVEL 1
#define XCHAL_INT1_LEVEL 1
#define XCHAL_INT2_LEVEL 1
#define XCHAL_INT3_LEVEL 1
#define XCHAL_INT4_LEVEL 1
#define XCHAL_INT5_LEVEL 1
#define XCHAL_INT6_LEVEL 1
#define XCHAL_INT7_LEVEL 1
#define XCHAL_INT8_LEVEL 1
#define XCHAL_INT9_LEVEL 1
#define XCHAL_INT10_LEVEL 1
#define XCHAL_INT11_LEVEL 1
#define XCHAL_INT12_LEVEL 1
#define XCHAL_INT13_LEVEL 1
#define XCHAL_INT14_LEVEL 3
#define XCHAL_DEBUGLEVEL 2 /* debug interrupt level */
#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
#define XCHAL_NMILEVEL 3 /* NMI "level" (for use with
EXCSAVE/EPS/EPC_n, RFI n) */
/* Type of each interrupt: */
#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT13_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
/* Masks of interrupts for each type of interrupt: */
#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFF8000
#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000080
#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00003F00
#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000003F
#define XCHAL_INTTYPE_MASK_TIMER 0x00000040
#define XCHAL_INTTYPE_MASK_NMI 0x00004000
#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
/* Interrupt numbers assigned to specific interrupt sources: */
#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
#define XCHAL_TIMER1_INTERRUPT XTHAL_TIMER_UNCONFIGURED
#define XCHAL_TIMER2_INTERRUPT XTHAL_TIMER_UNCONFIGURED
#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
/* Interrupt numbers for levels at which only one interrupt is configured: */
#define XCHAL_INTLEVEL3_NUM 14
/* (There are many interrupts each at level(s) 1.) */
/*
* External interrupt vectors/levels.
* These macros describe how Xtensa processor interrupt numbers
* (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
* map to external BInterrupt<n> pins, for those interrupts
* configured as external (level-triggered, edge-triggered, or NMI).
* See the Xtensa processor databook for more details.
*/
/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
#define XCHAL_EXTINT6_NUM 8 /* (intlevel 1) */
#define XCHAL_EXTINT7_NUM 9 /* (intlevel 1) */
#define XCHAL_EXTINT8_NUM 10 /* (intlevel 1) */
#define XCHAL_EXTINT9_NUM 11 /* (intlevel 1) */
#define XCHAL_EXTINT10_NUM 12 /* (intlevel 1) */
#define XCHAL_EXTINT11_NUM 13 /* (intlevel 1) */
#define XCHAL_EXTINT12_NUM 14 /* (intlevel 3) */
/*----------------------------------------------------------------------
EXCEPTIONS and VECTORS
----------------------------------------------------------------------*/
#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
number: 1 == XEA1 (old)
2 == XEA2 (new)
0 == XEAX (extern) */
#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
#define XCHAL_VECBASE_RESET_VADDR 0x40000000 /* VECBASE reset value */
#define XCHAL_VECBASE_RESET_PADDR 0x40000000
#define XCHAL_RESET_VECBASE_OVERLAP 0
#define XCHAL_RESET_VECTOR0_VADDR 0x50000000
#define XCHAL_RESET_VECTOR0_PADDR 0x50000000
#define XCHAL_RESET_VECTOR1_VADDR 0x40000080
#define XCHAL_RESET_VECTOR1_PADDR 0x40000080
#define XCHAL_RESET_VECTOR_VADDR 0x50000000
#define XCHAL_RESET_VECTOR_PADDR 0x50000000
#define XCHAL_USER_VECOFS 0x00000050
#define XCHAL_USER_VECTOR_VADDR 0x40000050
#define XCHAL_USER_VECTOR_PADDR 0x40000050
#define XCHAL_KERNEL_VECOFS 0x00000030
#define XCHAL_KERNEL_VECTOR_VADDR 0x40000030
#define XCHAL_KERNEL_VECTOR_PADDR 0x40000030
#define XCHAL_DOUBLEEXC_VECOFS 0x00000070
#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x40000070
#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x40000070
#define XCHAL_INTLEVEL2_VECOFS 0x00000010
#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x40000010
#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x40000010
#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL2_VECOFS
#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL2_VECTOR_PADDR
#define XCHAL_NMI_VECOFS 0x00000020
#define XCHAL_NMI_VECTOR_VADDR 0x40000020
#define XCHAL_NMI_VECTOR_PADDR 0x40000020
#define XCHAL_INTLEVEL3_VECOFS XCHAL_NMI_VECOFS
#define XCHAL_INTLEVEL3_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
#define XCHAL_INTLEVEL3_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
/*----------------------------------------------------------------------
DEBUG
----------------------------------------------------------------------*/
#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
#define XCHAL_NUM_IBREAK 1 /* number of IBREAKn regs */
#define XCHAL_NUM_DBREAK 1 /* number of DBREAKn regs */
#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option */
/*----------------------------------------------------------------------
MMU
----------------------------------------------------------------------*/
/* See core-matmap.h header file for more details. */
#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */
#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */
#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */
#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
[autorefill] and protection)
usable for an MMU-based OS */
/* If none of the above last 4 are set, it's a custom TLB configuration. */
#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
#endif /* _XTENSA_CORE_CONFIGURATION_H */

View File

@ -0,0 +1,316 @@
/*
* xtensa/config/core-matmap.h -- Memory access and translation mapping
* parameters (CHAL) of the Xtensa processor core configuration.
*
* If you are using Xtensa Tools, see <xtensa/config/core.h> (which includes
* this file) for more details.
*
* In the Xtensa processor products released to date, all parameters
* defined in this file are derivable (at least in theory) from
* information contained in the core-isa.h header file.
* In particular, the following core configuration parameters are relevant:
* XCHAL_HAVE_CACHEATTR
* XCHAL_HAVE_MIMIC_CACHEATTR
* XCHAL_HAVE_XLT_CACHEATTR
* XCHAL_HAVE_PTP_MMU
* XCHAL_ITLB_ARF_ENTRIES_LOG2
* XCHAL_DTLB_ARF_ENTRIES_LOG2
* XCHAL_DCACHE_IS_WRITEBACK
* XCHAL_ICACHE_SIZE (presence of I-cache)
* XCHAL_DCACHE_SIZE (presence of D-cache)
* XCHAL_HW_VERSION_MAJOR
* XCHAL_HW_VERSION_MINOR
*/
/* Customer ID=7011; Build=0x2b6f6; Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef XTENSA_CONFIG_CORE_MATMAP_H
#define XTENSA_CONFIG_CORE_MATMAP_H
/*----------------------------------------------------------------------
CACHE (MEMORY ACCESS) ATTRIBUTES
----------------------------------------------------------------------*/
/* Cache Attribute encodings -- lists of access modes for each cache attribute: */
#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_BYPASS XCHAL_SEP \
XTHAL_FAM_BYPASS XCHAL_SEP \
XTHAL_FAM_BYPASS XCHAL_SEP \
XTHAL_FAM_BYPASS XCHAL_SEP \
XTHAL_FAM_BYPASS XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION XCHAL_SEP \
XTHAL_FAM_EXCEPTION
#define XCHAL_LCA_LIST XTHAL_LAM_BYPASSG XCHAL_SEP \
XTHAL_LAM_BYPASSG XCHAL_SEP \
XTHAL_LAM_BYPASSG XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_BYPASSG XCHAL_SEP \
XTHAL_LAM_BYPASSG XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_EXCEPTION XCHAL_SEP \
XTHAL_LAM_BYPASSG XCHAL_SEP \
XTHAL_LAM_EXCEPTION
#define XCHAL_SCA_LIST XTHAL_SAM_BYPASS XCHAL_SEP \
XTHAL_SAM_BYPASS XCHAL_SEP \
XTHAL_SAM_BYPASS XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_BYPASS XCHAL_SEP \
XTHAL_SAM_BYPASS XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_EXCEPTION XCHAL_SEP \
XTHAL_SAM_BYPASS XCHAL_SEP \
XTHAL_SAM_EXCEPTION
/*
* Specific encoded cache attribute values of general interest.
* If a specific cache mode is not available, the closest available
* one is returned instead (eg. writethru instead of writeback,
* bypass instead of writethru).
*/
#define XCHAL_CA_BYPASS 2 /* cache disabled (bypassed) mode */
#define XCHAL_CA_WRITETHRU 2 /* cache enabled (write-through) mode */
#define XCHAL_CA_WRITEBACK 2 /* cache enabled (write-back) mode */
#define XCHAL_CA_WRITEBACK_NOALLOC 2 /* cache enabled (write-back no-allocate) mode */
#define XCHAL_CA_BYPASS_RW 0 /* cache disabled (bypassed) mode (no exec) */
#define XCHAL_CA_WRITETHRU_RW 0 /* cache enabled (write-through) mode (no exec) */
#define XCHAL_CA_WRITEBACK_RW 0 /* cache enabled (write-back) mode (no exec) */
#define XCHAL_CA_WRITEBACK_NOALLOC_RW 0 /* cache enabled (write-back no-allocate) mode (no exec) */
#define XCHAL_CA_ILLEGAL 15 /* no access allowed (all cause exceptions) mode */
#define XCHAL_CA_ISOLATE 0 /* cache isolate (accesses go to cache not memory) mode */
/*----------------------------------------------------------------------
MMU
----------------------------------------------------------------------*/
/*
* General notes on MMU parameters.
*
* Terminology:
* ASID = address-space ID (acts as an "extension" of virtual addresses)
* VPN = virtual page number
* PPN = physical page number
* CA = encoded cache attribute (access modes)
* TLB = translation look-aside buffer (term is stretched somewhat here)
* I = instruction (fetch accesses)
* D = data (load and store accesses)
* way = each TLB (ITLB and DTLB) consists of a number of "ways"
* that simultaneously match the virtual address of an access;
* a TLB successfully translates a virtual address if exactly
* one way matches the vaddr; if none match, it is a miss;
* if multiple match, one gets a "multihit" exception;
* each way can be independently configured in terms of number of
* entries, page sizes, which fields are writable or constant, etc.
* set = group of contiguous ways with exactly identical parameters
* ARF = auto-refill; hardware services a 1st-level miss by loading a PTE
* from the page table and storing it in one of the auto-refill ways;
* if this PTE load also misses, a miss exception is posted for s/w.
* min-wired = a "min-wired" way can be used to map a single (minimum-sized)
* page arbitrarily under program control; it has a single entry,
* is non-auto-refill (some other way(s) must be auto-refill),
* all its fields (VPN, PPN, ASID, CA) are all writable, and it
* supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current
* restriction is that this be the only page size it supports).
*
* TLB way entries are virtually indexed.
* TLB ways that support multiple page sizes:
* - must have all writable VPN and PPN fields;
* - can only use one page size at any given time (eg. setup at startup),
* selected by the respective ITLBCFG or DTLBCFG special register,
* whose bits n*4+3 .. n*4 index the list of page sizes for way n
* (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n);
* this list may be sparse for auto-refill ways because auto-refill
* ways have independent lists of supported page sizes sharing a
* common encoding with PTE entries; the encoding is the index into
* this list; unsupported sizes for a given way are zero in the list;
* selecting unsupported sizes results in undefined hardware behaviour;
* - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition).
*/
#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */
#define XCHAL_MMU_ASID_KERNEL 0 /* ASID value indicating kernel (ring 0) address space */
#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */
#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 29 /* max page size in a PTE structure (log2) */
#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 29 /* min page size in a PTE structure (log2) */
/*** Instruction TLB: ***/
#define XCHAL_ITLB_WAY_BITS 0 /* number of bits holding the ways */
#define XCHAL_ITLB_WAYS 1 /* number of ways (n-way set-associative TLB) */
#define XCHAL_ITLB_ARF_WAYS 0 /* number of auto-refill ways */
#define XCHAL_ITLB_SETS 1 /* number of sets (groups of ways with identical settings) */
/* Way set to which each way belongs: */
#define XCHAL_ITLB_WAY0_SET 0
/* Ways sets that are used by hardware auto-refill (ARF): */
#define XCHAL_ITLB_ARF_SETS 0 /* number of auto-refill sets */
/* Way sets that are "min-wired" (see terminology comment above): */
#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
/* ITLB way set 0 (group of ways 0 thru 0): */
#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */
#define XCHAL_ITLB_SET0_WAYS 1 /* number of (contiguous) ways in this way set */
#define XCHAL_ITLB_SET0_ENTRIES_LOG2 3 /* log2(number of entries in this way) */
#define XCHAL_ITLB_SET0_ENTRIES 8 /* number of entries in this way (always a power of 2) */
#define XCHAL_ITLB_SET0_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 29 /* log2(minimum supported page size) */
#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 29 /* log2(maximum supported page size) */
#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 29 /* list of log2(page size)s, separated by XCHAL_SEP;
2^PAGESZ_BITS entries in list, unsupported entries are zero */
#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0x00000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0xE0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
#define XCHAL_ITLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
#define XCHAL_ITLB_SET0_CA_RESET 1 /* 1 if CA reset values defined (and all writable); 0 otherwise */
/* Constant VPN values for each entry of ITLB way set 0 (because VPN_CONSTMASK is non-zero): */
#define XCHAL_ITLB_SET0_E0_VPN_CONST 0x00000000
#define XCHAL_ITLB_SET0_E1_VPN_CONST 0x20000000
#define XCHAL_ITLB_SET0_E2_VPN_CONST 0x40000000
#define XCHAL_ITLB_SET0_E3_VPN_CONST 0x60000000
#define XCHAL_ITLB_SET0_E4_VPN_CONST 0x80000000
#define XCHAL_ITLB_SET0_E5_VPN_CONST 0xA0000000
#define XCHAL_ITLB_SET0_E6_VPN_CONST 0xC0000000
#define XCHAL_ITLB_SET0_E7_VPN_CONST 0xE0000000
/* Constant PPN values for each entry of ITLB way set 0 (because PPN_CONSTMASK is non-zero): */
#define XCHAL_ITLB_SET0_E0_PPN_CONST 0x00000000
#define XCHAL_ITLB_SET0_E1_PPN_CONST 0x20000000
#define XCHAL_ITLB_SET0_E2_PPN_CONST 0x40000000
#define XCHAL_ITLB_SET0_E3_PPN_CONST 0x60000000
#define XCHAL_ITLB_SET0_E4_PPN_CONST 0x80000000
#define XCHAL_ITLB_SET0_E5_PPN_CONST 0xA0000000
#define XCHAL_ITLB_SET0_E6_PPN_CONST 0xC0000000
#define XCHAL_ITLB_SET0_E7_PPN_CONST 0xE0000000
/* Reset CA values for each entry of ITLB way set 0 (because SET0_CA_RESET is non-zero): */
#define XCHAL_ITLB_SET0_E0_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E1_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E2_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E3_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E4_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E5_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E6_CA_RESET 0x02
#define XCHAL_ITLB_SET0_E7_CA_RESET 0x02
/*** Data TLB: ***/
#define XCHAL_DTLB_WAY_BITS 0 /* number of bits holding the ways */
#define XCHAL_DTLB_WAYS 1 /* number of ways (n-way set-associative TLB) */
#define XCHAL_DTLB_ARF_WAYS 0 /* number of auto-refill ways */
#define XCHAL_DTLB_SETS 1 /* number of sets (groups of ways with identical settings) */
/* Way set to which each way belongs: */
#define XCHAL_DTLB_WAY0_SET 0
/* Ways sets that are used by hardware auto-refill (ARF): */
#define XCHAL_DTLB_ARF_SETS 0 /* number of auto-refill sets */
/* Way sets that are "min-wired" (see terminology comment above): */
#define XCHAL_DTLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
/* DTLB way set 0 (group of ways 0 thru 0): */
#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */
#define XCHAL_DTLB_SET0_WAYS 1 /* number of (contiguous) ways in this way set */
#define XCHAL_DTLB_SET0_ENTRIES_LOG2 3 /* log2(number of entries in this way) */
#define XCHAL_DTLB_SET0_ENTRIES 8 /* number of entries in this way (always a power of 2) */
#define XCHAL_DTLB_SET0_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 29 /* log2(minimum supported page size) */
#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 29 /* log2(maximum supported page size) */
#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 29 /* list of log2(page size)s, separated by XCHAL_SEP;
2^PAGESZ_BITS entries in list, unsupported entries are zero */
#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0x00000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0xE0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
#define XCHAL_DTLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
#define XCHAL_DTLB_SET0_CA_RESET 1 /* 1 if CA reset values defined (and all writable); 0 otherwise */
/* Constant VPN values for each entry of DTLB way set 0 (because VPN_CONSTMASK is non-zero): */
#define XCHAL_DTLB_SET0_E0_VPN_CONST 0x00000000
#define XCHAL_DTLB_SET0_E1_VPN_CONST 0x20000000
#define XCHAL_DTLB_SET0_E2_VPN_CONST 0x40000000
#define XCHAL_DTLB_SET0_E3_VPN_CONST 0x60000000
#define XCHAL_DTLB_SET0_E4_VPN_CONST 0x80000000
#define XCHAL_DTLB_SET0_E5_VPN_CONST 0xA0000000
#define XCHAL_DTLB_SET0_E6_VPN_CONST 0xC0000000
#define XCHAL_DTLB_SET0_E7_VPN_CONST 0xE0000000
/* Constant PPN values for each entry of DTLB way set 0 (because PPN_CONSTMASK is non-zero): */
#define XCHAL_DTLB_SET0_E0_PPN_CONST 0x00000000
#define XCHAL_DTLB_SET0_E1_PPN_CONST 0x20000000
#define XCHAL_DTLB_SET0_E2_PPN_CONST 0x40000000
#define XCHAL_DTLB_SET0_E3_PPN_CONST 0x60000000
#define XCHAL_DTLB_SET0_E4_PPN_CONST 0x80000000
#define XCHAL_DTLB_SET0_E5_PPN_CONST 0xA0000000
#define XCHAL_DTLB_SET0_E6_PPN_CONST 0xC0000000
#define XCHAL_DTLB_SET0_E7_PPN_CONST 0xE0000000
/* Reset CA values for each entry of DTLB way set 0 (because SET0_CA_RESET is non-zero): */
#define XCHAL_DTLB_SET0_E0_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E1_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E2_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E3_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E4_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E5_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E6_CA_RESET 0x02
#define XCHAL_DTLB_SET0_E7_CA_RESET 0x02
#endif /*XTENSA_CONFIG_CORE_MATMAP_H*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
/* Definitions for Xtensa instructions, types, and protos. */
/* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2003-2004 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/* NOTE: This file exists only for backward compatibility with T1050
and earlier Xtensa releases. It includes only a subset of the
available header files. */
#ifndef _XTENSA_BASE_HEADER
#define _XTENSA_BASE_HEADER
#ifdef __XTENSA__
#include <xtensa/tie/xt_core.h>
#include <xtensa/tie/xt_misc.h>
#endif /* __XTENSA__ */
#endif /* !_XTENSA_BASE_HEADER */

View File

@ -0,0 +1,80 @@
/*
* Xtensa Special Register symbolic names
*/
/* $Id: //depot/rel/Boreal/Xtensa/SWConfig/hal/specreg.h.tpp#2 $ */
/* Customer ID=7011; Build=0x2b6f6; Copyright (c) 1998-2002 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef XTENSA_SPECREG_H
#define XTENSA_SPECREG_H
/* Include these special register bitfield definitions, for historical reasons: */
#include <xtensa/corebits.h>
/* Special registers: */
#define SAR 3
#define LITBASE 5
#define IBREAKENABLE 96
#define DDR 104
#define IBREAKA_0 128
#define DBREAKA_0 144
#define DBREAKC_0 160
#define EPC_1 177
#define EPC_2 178
#define EPC_3 179
#define DEPC 192
#define EPS_2 194
#define EPS_3 195
#define EXCSAVE_1 209
#define EXCSAVE_2 210
#define EXCSAVE_3 211
#define INTERRUPT 226
#define INTENABLE 228
#define PS 230
#define VECBASE 231
#define EXCCAUSE 232
#define DEBUGCAUSE 233
#define CCOUNT 234
#define PRID 235
#define ICOUNT 236
#define ICOUNTLEVEL 237
#define EXCVADDR 238
#define CCOMPARE_0 240
/* Special cases (bases of special register series): */
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPS 192
#define EXCSAVE 208
#define CCOMPARE 240
/* Special names for read-only and write-only interrupt registers: */
#define INTREAD 226
#define INTSET 226
#define INTCLEAR 227
#endif /* XTENSA_SPECREG_H */

View File

@ -0,0 +1,252 @@
/*
* xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration
*
* NOTE: The location and contents of this file are highly subject to change.
*
* Source for configuration-independent binaries (which link in a
* configuration-specific HAL library) must NEVER include this file.
* The HAL itself has historically included this file in some instances,
* but this is not appropriate either, because the HAL is meant to be
* core-specific but system independent.
*/
/* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2000-2007 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef XTENSA_CONFIG_SYSTEM_H
#define XTENSA_CONFIG_SYSTEM_H
/*#include <xtensa/hal.h>*/
/*----------------------------------------------------------------------
CONFIGURED SOFTWARE OPTIONS
----------------------------------------------------------------------*/
#define XSHAL_USE_ABSOLUTE_LITERALS 0 /* (sw-only option, whether software uses absolute literals) */
#define XSHAL_ABI XTHAL_ABI_CALL0 /* (sw-only option, selected ABI) */
/* The above maps to one of the following constants: */
#define XTHAL_ABI_WINDOWED 0
#define XTHAL_ABI_CALL0 1
/* Alternatives: */
/*#define XSHAL_WINDOWED_ABI 0*/ /* set if windowed ABI selected */
/*#define XSHAL_CALL0_ABI 1*/ /* set if call0 ABI selected */
#define XSHAL_CLIB XTHAL_CLIB_NEWLIB /* (sw-only option, selected C library) */
/* The above maps to one of the following constants: */
#define XTHAL_CLIB_NEWLIB 0
#define XTHAL_CLIB_UCLIBC 1
/* Alternatives: */
/*#define XSHAL_NEWLIB 1*/ /* set if newlib C library selected */
/*#define XSHAL_UCLIBC 0*/ /* set if uCLibC C library selected */
#define XSHAL_USE_FLOATING_POINT 1
/*----------------------------------------------------------------------
DEVICE ADDRESSES
----------------------------------------------------------------------*/
/*
* Strange place to find these, but the configuration GUI
* allows moving these around to account for various core
* configurations. Specific boards (and their BSP software)
* will have specific meanings for these components.
*/
/* I/O Block areas: */
#define XSHAL_IOBLOCK_CACHED_VADDR 0x70000000
#define XSHAL_IOBLOCK_CACHED_PADDR 0x70000000
#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000
#define XSHAL_IOBLOCK_BYPASS_VADDR 0x90000000
#define XSHAL_IOBLOCK_BYPASS_PADDR 0x90000000
#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000
/* System ROM: */
#define XSHAL_ROM_VADDR 0x50000000
#define XSHAL_ROM_PADDR 0x50000000
#define XSHAL_ROM_SIZE 0x01000000
/* Largest available area (free of vectors): */
#define XSHAL_ROM_AVAIL_VADDR 0x50000300
#define XSHAL_ROM_AVAIL_VSIZE 0x00FFFD00
/* System RAM: */
#define XSHAL_RAM_VADDR 0x60000000
#define XSHAL_RAM_PADDR 0x60000000
#define XSHAL_RAM_VSIZE 0x04000000
#define XSHAL_RAM_PSIZE 0x04000000
#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE
/* Largest available area (free of vectors): */
#define XSHAL_RAM_AVAIL_VADDR 0x60000000
#define XSHAL_RAM_AVAIL_VSIZE 0x04000000
/*
* Shadow system RAM (same device as system RAM, at different address).
* (Emulation boards need this for the SONIC Ethernet driver
* when data caches are configured for writeback mode.)
* NOTE: on full MMU configs, this points to the BYPASS virtual address
* of system RAM, ie. is the same as XSHAL_RAM_* except that virtual
* addresses are viewed through the BYPASS static map rather than
* the CACHED static map.
*/
#define XSHAL_RAM_BYPASS_VADDR 0xA0000000
#define XSHAL_RAM_BYPASS_PADDR 0xA0000000
#define XSHAL_RAM_BYPASS_PSIZE 0x04000000
/* Alternate system RAM (different device than system RAM): */
/*#define XSHAL_ALTRAM_[VP]ADDR ...not configured...*/
/*#define XSHAL_ALTRAM_SIZE ...not configured...*/
/* Some available location in which to place devices in a simulation (eg. XTMP): */
#define XSHAL_SIMIO_CACHED_VADDR 0xC0000000
#define XSHAL_SIMIO_BYPASS_VADDR 0xC0000000
#define XSHAL_SIMIO_PADDR 0xC0000000
#define XSHAL_SIMIO_SIZE 0x20000000
/*----------------------------------------------------------------------
* DEVICE-ADDRESS DEPENDENT...
*
* Values written to CACHEATTR special register (or its equivalent)
* to enable and disable caches in various modes.
*----------------------------------------------------------------------*/
/*----------------------------------------------------------------------
BACKWARD COMPATIBILITY ...
----------------------------------------------------------------------*/
/*
* NOTE: the following two macros are DEPRECATED. Use the latter
* board-specific macros instead, which are specially tuned for the
* particular target environments' memory maps.
*/
#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */
#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */
/*----------------------------------------------------------------------
GENERIC
----------------------------------------------------------------------*/
/* For the following, a 512MB region is used if it contains a system (PIF) RAM,
* system (PIF) ROM, local memory, or XLMI. */
/* These set any unused 512MB region to cache-BYPASS attribute: */
#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x22221112 /* enable caches in write-back mode */
#define XSHAL_ALLVALID_CACHEATTR_WRITEALLOC 0x22221112 /* enable caches in write-allocate mode */
#define XSHAL_ALLVALID_CACHEATTR_WRITETHRU 0x22221112 /* enable caches in write-through mode */
#define XSHAL_ALLVALID_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */
#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting to enable caches */
/* These set any unused 512MB region to ILLEGAL attribute: */
#define XSHAL_STRICT_CACHEATTR_WRITEBACK 0xFFFF111F /* enable caches in write-back mode */
#define XSHAL_STRICT_CACHEATTR_WRITEALLOC 0xFFFF111F /* enable caches in write-allocate mode */
#define XSHAL_STRICT_CACHEATTR_WRITETHRU 0xFFFF111F /* enable caches in write-through mode */
#define XSHAL_STRICT_CACHEATTR_BYPASS 0xFFFF222F /* disable caches in bypass mode */
#define XSHAL_STRICT_CACHEATTR_DEFAULT XSHAL_STRICT_CACHEATTR_WRITEBACK /* default setting to enable caches */
/* These set the first 512MB, if unused, to ILLEGAL attribute to help catch
* NULL-pointer dereference bugs; all other unused 512MB regions are set
* to cache-BYPASS attribute: */
#define XSHAL_TRAPNULL_CACHEATTR_WRITEBACK 0x2222111F /* enable caches in write-back mode */
#define XSHAL_TRAPNULL_CACHEATTR_WRITEALLOC 0x2222111F /* enable caches in write-allocate mode */
#define XSHAL_TRAPNULL_CACHEATTR_WRITETHRU 0x2222111F /* enable caches in write-through mode */
#define XSHAL_TRAPNULL_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */
#define XSHAL_TRAPNULL_CACHEATTR_DEFAULT XSHAL_TRAPNULL_CACHEATTR_WRITEBACK /* default setting to enable caches */
/*----------------------------------------------------------------------
ISS (Instruction Set Simulator) SPECIFIC ...
----------------------------------------------------------------------*/
/* For now, ISS defaults to the TRAPNULL settings: */
#define XSHAL_ISS_CACHEATTR_WRITEBACK XSHAL_TRAPNULL_CACHEATTR_WRITEBACK
#define XSHAL_ISS_CACHEATTR_WRITEALLOC XSHAL_TRAPNULL_CACHEATTR_WRITEALLOC
#define XSHAL_ISS_CACHEATTR_WRITETHRU XSHAL_TRAPNULL_CACHEATTR_WRITETHRU
#define XSHAL_ISS_CACHEATTR_BYPASS XSHAL_TRAPNULL_CACHEATTR_BYPASS
#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_TRAPNULL_CACHEATTR_WRITEBACK
#define XSHAL_ISS_PIPE_REGIONS 0
#define XSHAL_ISS_SDRAM_REGIONS 0
/*----------------------------------------------------------------------
XT2000 BOARD SPECIFIC ...
----------------------------------------------------------------------*/
/* For the following, a 512MB region is used if it contains any system RAM,
* system ROM, local memory, XLMI, or other XT2000 board device or memory.
* Regions containing devices are forced to cache-BYPASS mode regardless
* of whether the macro is _WRITEBACK vs. _BYPASS etc. */
/* These set any 512MB region unused on the XT2000 to ILLEGAL attribute: */
#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0xFF22111F /* enable caches in write-back mode */
#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0xFF22111F /* enable caches in write-allocate mode */
#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0xFF22111F /* enable caches in write-through mode */
#define XSHAL_XT2000_CACHEATTR_BYPASS 0xFF22222F /* disable caches in bypass mode */
#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */
#define XSHAL_XT2000_PIPE_REGIONS 0x00000000 /* BusInt pipeline regions */
#define XSHAL_XT2000_SDRAM_REGIONS 0x00000440 /* BusInt SDRAM regions */
/*----------------------------------------------------------------------
VECTOR INFO AND SIZES
----------------------------------------------------------------------*/
#define XSHAL_VECTORS_PACKED 0
#define XSHAL_STATIC_VECTOR_SELECT 0
#define XSHAL_RESET_VECTOR_VADDR 0x50000000
#define XSHAL_RESET_VECTOR_PADDR 0x50000000
/*
* Sizes allocated to vectors by the system (memory map) configuration.
* These sizes are constrained by core configuration (eg. one vector's
* code cannot overflow into another vector) but are dependent on the
* system or board (or LSP) memory map configuration.
*
* Whether or not each vector happens to be in a system ROM is also
* a system configuration matter, sometimes useful, included here also:
*/
#define XSHAL_RESET_VECTOR_SIZE 0x00000300
#define XSHAL_RESET_VECTOR_ISROM 1
#define XSHAL_USER_VECTOR_SIZE 0x0000001C
#define XSHAL_USER_VECTOR_ISROM 0
#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C
#define XSHAL_KERNEL_VECTOR_ISROM 0
#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x00000010
#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0
#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL2_VECTOR_ISROM 0
#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL2_VECTOR_SIZE
#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL2_VECTOR_ISROM
#define XSHAL_NMI_VECTOR_SIZE 0x0000000C
#define XSHAL_NMI_VECTOR_ISROM 0
#define XSHAL_INTLEVEL3_VECTOR_SIZE XSHAL_NMI_VECTOR_SIZE
#endif /*XTENSA_CONFIG_SYSTEM_H*/

View File

@ -0,0 +1,77 @@
/*
* tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
*
* NOTE: This header file is not meant to be included directly.
*/
/* This header file contains assembly-language definitions (assembly
macros, etc.) for this specific Xtensa processor's TIE extensions
and options. It is customized to this Xtensa processor configuration.
Customer ID=7011; Build=0x2b6f6; Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_TIE_ASM_H
#define _XTENSA_CORE_TIE_ASM_H
/* Selection parameter values for save-area save/restore macros: */
/* Option vs. TIE: */
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
/* Whether used automatically by compiler: */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
/* ABI handling across function calls: */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
/* Misc */
#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Save area ptr (clobbered): ptr (1 byte aligned)
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
*/
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
xchal_sa_start \continue, \ofs
.endm // xchal_ncp_store
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Save area ptr (clobbered): ptr (1 byte aligned)
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
*/
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
xchal_sa_start \continue, \ofs
.endm // xchal_ncp_load
#define XCHAL_NCP_NUM_ATMPS 0
#define XCHAL_SA_NUM_ATMPS 0
#endif /*_XTENSA_CORE_TIE_ASM_H*/

View File

@ -0,0 +1,119 @@
/*
* tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
*
* NOTE: This header file is not meant to be included directly.
*/
/* This header file describes this specific Xtensa processor's TIE extensions
that extend basic Xtensa core functionality. It is customized to this
Xtensa processor configuration.
Customer ID=7011; Build=0x2b6f6; Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_TIE_H
#define _XTENSA_CORE_TIE_H
#define XCHAL_CP_NUM 0 /* number of coprocessors */
#define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
/* Total save area for optional and custom state (NCP + CPn): */
#define XCHAL_TOTAL_SA_SIZE 0 /* with 16-byte align padding */
#define XCHAL_TOTAL_SA_ALIGN 1 /* actual minimum alignment */
/*
* Detailed contents of save areas.
* NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
* before expanding the XCHAL_xxx_SA_LIST() macros.
*
* XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
* dbnum,base,regnum,bitsz,gapsz,reset,x...)
*
* s = passed from XCHAL_*_LIST(s), eg. to select how to expand
* ccused = set if used by compiler without special options or code
* abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
* kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
* opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
* name = lowercase reg name (no quotes)
* galign = group byte alignment (power of 2) (galign >= align)
* align = register byte alignment (power of 2)
* asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
* (not including any pad bytes required to galign this or next reg)
* dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
* base = reg shortname w/o index (or sr=special, ur=TIE user reg)
* regnum = reg index in regfile, or special/TIE-user reg number
* bitsz = number of significant bits (regfile width, or ur/sr mask bits)
* gapsz = intervening bits, if bitsz bits not stored contiguously
* (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
* reset = register reset value (or 0 if undefined at reset)
* x = reserved for future use (0 until then)
*
* To filter out certain registers, e.g. to expand only the non-global
* registers used by the compiler, you can do something like this:
*
* #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
* #define SELCC0(p...)
* #define SELCC1(abikind,p...) SELAK##abikind(p)
* #define SELAK0(p...) REG(p)
* #define SELAK1(p...) REG(p)
* #define SELAK2(p...)
* #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
* ...what you want to expand...
*/
#define XCHAL_NCP_SA_NUM 0
#define XCHAL_NCP_SA_LIST(s) /* empty */
#define XCHAL_CP0_SA_NUM 0
#define XCHAL_CP0_SA_LIST(s) /* empty */
#define XCHAL_CP1_SA_NUM 0
#define XCHAL_CP1_SA_LIST(s) /* empty */
#define XCHAL_CP2_SA_NUM 0
#define XCHAL_CP2_SA_LIST(s) /* empty */
#define XCHAL_CP3_SA_NUM 0
#define XCHAL_CP3_SA_LIST(s) /* empty */
#define XCHAL_CP4_SA_NUM 0
#define XCHAL_CP4_SA_LIST(s) /* empty */
#define XCHAL_CP5_SA_NUM 0
#define XCHAL_CP5_SA_LIST(s) /* empty */
#define XCHAL_CP6_SA_NUM 0
#define XCHAL_CP6_SA_LIST(s) /* empty */
#define XCHAL_CP7_SA_NUM 0
#define XCHAL_CP7_SA_LIST(s) /* empty */
/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
#endif /*_XTENSA_CORE_TIE_H*/

View File

@ -0,0 +1,914 @@
/*
* xtensa/coreasm.h -- assembler-specific definitions that depend on CORE configuration
*
* Source for configuration-independent binaries (which link in a
* configuration-specific HAL library) must NEVER include this file.
* It is perfectly normal, however, for the HAL itself to include this file.
*
* This file must NOT include xtensa/config/system.h. Any assembler
* header file that depends on system information should likely go
* in a new systemasm.h (or sysasm.h) header file.
*
* NOTE: macro beqi32 is NOT configuration-dependent, and is placed
* here until we have a proper configuration-independent header file.
*/
/* $Id: //depot/rel/Boreal/Xtensa/OS/include/xtensa/coreasm.h#2 $ */
/*
* Copyright (c) 2000-2007 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTENSA_COREASM_H
#define XTENSA_COREASM_H
/*
* Tell header files this is assembly source, so they can avoid non-assembler
* definitions (eg. C types etc):
*/
#ifndef _ASMLANGUAGE /* conditionalize to avoid cpp warnings (3rd parties might use same macro) */
#define _ASMLANGUAGE
#endif
#include <xtensa/config/core.h>
#include <xtensa/config/specreg.h>
/*
* Assembly-language specific definitions (assembly macros, etc.).
*/
/*----------------------------------------------------------------------
* find_ms_setbit
*
* This macro finds the most significant bit that is set in <as>
* and return its index + <base> in <ad>, or <base> - 1 if <as> is zero.
* The index counts starting at zero for the lsbit, so the return
* value ranges from <base>-1 (no bit set) to <base>+31 (msbit set).
*
* Parameters:
* <ad> destination address register (any register)
* <as> source address register
* <at> temporary address register (must be different than <as>)
* <base> constant value added to result (usually 0 or 1)
* On entry:
* <ad> = undefined if different than <as>
* <as> = value whose most significant set bit is to be found
* <at> = undefined
* no other registers are used by this macro.
* On exit:
* <ad> = <base> + index of msbit set in original <as>,
* = <base> - 1 if original <as> was zero.
* <as> clobbered (if not <ad>)
* <at> clobbered (if not <ad>)
* Example:
* find_ms_setbit a0, a4, a0, 0 -- return in a0 index of msbit set in a4
*/
.macro find_ms_setbit ad, as, at, base
#if XCHAL_HAVE_NSA
movi \at, 31+\base
nsau \as, \as // get index of \as, numbered from msbit (32 if absent)
sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent)
#else /* XCHAL_HAVE_NSA */
movi \at, \base // start with result of 0 (point to lsbit of 32)
beqz \as, 2f // special case for zero argument: return -1
bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits)
addi \at, \at, 16 // no, increment result to upper 16 bits (of 32)
//srli \as, \as, 16 // check upper half (shift right 16 bits)
extui \as, \as, 16, 16 // check upper half (shift right 16 bits)
1: bltui \as, 0x100, 1f // is it one of the 8 lsbits? (if so, check lower 8 bits)
addi \at, \at, 8 // no, increment result to upper 8 bits (of 16)
srli \as, \as, 8 // shift right to check upper 8 bits
1: bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits)
addi \at, \at, 4 // no, increment result to upper 4 bits (of 8)
srli \as, \as, 4 // shift right 4 bits to check upper half
1: bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits)
addi \at, \at, 2 // no, increment result to upper 2 bits (of 4)
srli \as, \as, 2 // shift right 2 bits to check upper half
1: bltui \as, 0x2, 1f // is it the lsbit?
addi \at, \at, 2 // no, increment result to upper bit (of 2)
2: addi \at, \at, -1 // (from just above: add 1; from beqz: return -1)
//srli \as, \as, 1
1: // done! \at contains index of msbit set (or -1 if none set)
.if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15)
mov \ad, \at // then move result to \ad
.endif
#endif /* XCHAL_HAVE_NSA */
.endm // find_ms_setbit
/*----------------------------------------------------------------------
* find_ls_setbit
*
* This macro finds the least significant bit that is set in <as>,
* and return its index in <ad>.
* Usage is the same as for the find_ms_setbit macro.
* Example:
* find_ls_setbit a0, a4, a0, 0 -- return in a0 index of lsbit set in a4
*/
.macro find_ls_setbit ad, as, at, base
neg \at, \as // keep only the least-significant bit that is set...
and \as, \at, \as // ... in \as
find_ms_setbit \ad, \as, \at, \base
.endm // find_ls_setbit
/*----------------------------------------------------------------------
* find_ls_one
*
* Same as find_ls_setbit with base zero.
* Source (as) and destination (ad) registers must be different.
* Provided for backward compatibility.
*/
.macro find_ls_one ad, as
find_ls_setbit \ad, \as, \ad, 0
.endm // find_ls_one
/*----------------------------------------------------------------------
* floop, floopnez, floopgtz, floopend
*
* These macros are used for fast inner loops that
* work whether or not the Loops options is configured.
* If the Loops option is configured, they simply use
* the zero-overhead LOOP instructions; otherwise
* they use explicit decrement and branch instructions.
*
* They are used in pairs, with floop, floopnez or floopgtz
* at the beginning of the loop, and floopend at the end.
*
* Each pair of loop macro calls must be given the loop count
* address register and a unique label for that loop.
*
* Example:
*
* movi a3, 16 // loop 16 times
* floop a3, myloop1
* :
* bnez a7, end1 // exit loop if a7 != 0
* :
* floopend a3, myloop1
* end1:
*
* Like the LOOP instructions, these macros cannot be
* nested, must include at least one instruction,
* cannot call functions inside the loop, etc.
* The loop can be exited by jumping to the instruction
* following floopend (or elsewhere outside the loop),
* or continued by jumping to a NOP instruction placed
* immediately before floopend.
*
* Unlike LOOP instructions, the register passed to floop*
* cannot be used inside the loop, because it is used as
* the loop counter if the Loops option is not configured.
* And its value is undefined after exiting the loop.
* And because the loop counter register is active inside
* the loop, you can't easily use this construct to loop
* across a register file using ROTW as you might with LOOP
* instructions, unless you copy the loop register along.
*/
/* Named label version of the macros: */
.macro floop ar, endlabel
floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
.endm
.macro floopnez ar, endlabel
floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
.endm
.macro floopgtz ar, endlabel
floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
.endm
.macro floopend ar, endlabel
floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
.endm
/* Numbered local label version of the macros: */
#if 0 /*UNTESTED*/
.macro floop89 ar
floop_ \ar, 8, 9f
.endm
.macro floopnez89 ar
floopnez_ \ar, 8, 9f
.endm
.macro floopgtz89 ar
floopgtz_ \ar, 8, 9f
.endm
.macro floopend89 ar
floopend_ \ar, 8b, 9
.endm
#endif /*0*/
/* Underlying version of the macros: */
.macro floop_ ar, startlabel, endlabelref
.ifdef _infloop_
.if _infloop_
.err // Error: floop cannot be nested
.endif
.endif
.set _infloop_, 1
#if XCHAL_HAVE_LOOPS
loop \ar, \endlabelref
#else /* XCHAL_HAVE_LOOPS */
\startlabel:
addi \ar, \ar, -1
#endif /* XCHAL_HAVE_LOOPS */
.endm // floop_
.macro floopnez_ ar, startlabel, endlabelref
.ifdef _infloop_
.if _infloop_
.err // Error: floopnez cannot be nested
.endif
.endif
.set _infloop_, 1
#if XCHAL_HAVE_LOOPS
loopnez \ar, \endlabelref
#else /* XCHAL_HAVE_LOOPS */
beqz \ar, \endlabelref
\startlabel:
addi \ar, \ar, -1
#endif /* XCHAL_HAVE_LOOPS */
.endm // floopnez_
.macro floopgtz_ ar, startlabel, endlabelref
.ifdef _infloop_
.if _infloop_
.err // Error: floopgtz cannot be nested
.endif
.endif
.set _infloop_, 1
#if XCHAL_HAVE_LOOPS
loopgtz \ar, \endlabelref
#else /* XCHAL_HAVE_LOOPS */
bltz \ar, \endlabelref
beqz \ar, \endlabelref
\startlabel:
addi \ar, \ar, -1
#endif /* XCHAL_HAVE_LOOPS */
.endm // floopgtz_
.macro floopend_ ar, startlabelref, endlabel
.ifndef _infloop_
.err // Error: floopend without matching floopXXX
.endif
.ifeq _infloop_
.err // Error: floopend without matching floopXXX
.endif
.set _infloop_, 0
#if ! XCHAL_HAVE_LOOPS
bnez \ar, \startlabelref
#endif /* XCHAL_HAVE_LOOPS */
\endlabel:
.endm // floopend_
/*----------------------------------------------------------------------
* crsil -- conditional RSIL (read/set interrupt level)
*
* Executes the RSIL instruction if it exists, else just reads PS.
* The RSIL instruction does not exist in the new exception architecture
* if the interrupt option is not selected.
*/
.macro crsil ar, newlevel
#if XCHAL_HAVE_OLD_EXC_ARCH || XCHAL_HAVE_INTERRUPTS
rsil \ar, \newlevel
#else
rsr \ar, PS
#endif
.endm // crsil
/*----------------------------------------------------------------------
* safe_movi_a0 -- move constant into a0 when L32R is not safe
*
* This macro is typically used by interrupt/exception handlers.
* Loads a 32-bit constant in a0, without using any other register,
* and without corrupting the LITBASE register, even when the
* value of the LITBASE register is unknown (eg. when application
* code and interrupt/exception handling code are built independently,
* and thus with independent values of the LITBASE register;
* debug monitors are one example of this).
*
* Worst-case size of resulting code: 17 bytes.
*/
.macro safe_movi_a0 constant
#if XCHAL_HAVE_ABSOLUTE_LITERALS
/* Contort a PC-relative literal load even though we may be in litbase-relative mode: */
j 1f
.begin no-transform // ensure what follows is assembled exactly as-is
.align 4 // ensure constant and call0 target ...
.byte 0 // ... are 4-byte aligned (call0 instruction is 3 bytes long)
1: call0 2f // read PC (that follows call0) in a0
.long \constant // 32-bit constant to load into a0
2:
.end no-transform
l32i a0, a0, 0 // load constant
#else
movi a0, \constant // no LITBASE, can assume PC-relative L32R
#endif
.endm
/*----------------------------------------------------------------------
* window_spill{4,8,12}
*
* These macros spill callers' register windows to the stack.
* They work for both privileged and non-privileged tasks.
* Must be called from a windowed ABI context, eg. within
* a windowed ABI function (ie. valid stack frame, window
* exceptions enabled, not in exception mode, etc).
*
* This macro requires a single invocation of the window_spill_common
* macro in the same assembly unit and section.
*
* Note that using window_spill{4,8,12} macros is more efficient
* than calling a function implemented using window_spill_function,
* because the latter needs extra code to figure out the size of
* the call to the spilling function.
*
* Example usage:
*
* .text
* .align 4
* .global some_function
* .type some_function,@function
* some_function:
* entry a1, 16
* :
* :
*
* window_spill4 // Spill windows of some_function's callers; preserves a0..a3 only;
* // to use window_spill{8,12} in this example function we'd have
* // to increase space allocated by the entry instruction, because
* // 16 bytes only allows call4; 32 or 48 bytes (+locals) are needed
* // for call8/window_spill8 or call12/window_spill12 respectively.
*
* :
*
* retw
*
* window_spill_common // instantiates code used by window_spill4
*
*
* On entry:
* none (if window_spill4)
* stack frame has enough space allocated for call8 (if window_spill8)
* stack frame has enough space allocated for call12 (if window_spill12)
* On exit:
* a4..a15 clobbered (if window_spill4)
* a8..a15 clobbered (if window_spill8)
* a12..a15 clobbered (if window_spill12)
* no caller windows are in live registers
*/
.macro window_spill4
#if XCHAL_HAVE_WINDOWED
# if XCHAL_NUM_AREGS == 16
movi a15, 0 // for 16-register files, no need to call to reach the end
# elif XCHAL_NUM_AREGS == 32
call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers
# elif XCHAL_NUM_AREGS == 64
call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers
# endif
#endif
.endm // window_spill4
.macro window_spill8
#if XCHAL_HAVE_WINDOWED
# if XCHAL_NUM_AREGS == 16
movi a15, 0 // for 16-register files, no need to call to reach the end
# elif XCHAL_NUM_AREGS == 32
call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers
# elif XCHAL_NUM_AREGS == 64
call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers
# endif
#endif
.endm // window_spill8
.macro window_spill12
#if XCHAL_HAVE_WINDOWED
# if XCHAL_NUM_AREGS == 16
movi a15, 0 // for 16-register files, no need to call to reach the end
# elif XCHAL_NUM_AREGS == 32
call12 .L__wdwspill_assist20 // call deep enough to clear out any live callers
# elif XCHAL_NUM_AREGS == 64
call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers
# endif
#endif
.endm // window_spill12
/*----------------------------------------------------------------------
* window_spill_function
*
* This macro outputs a function that will spill its caller's callers'
* register windows to the stack. Eg. it could be used to implement
* a version of xthal_window_spill() that works in non-privileged tasks.
* This works for both privileged and non-privileged tasks.
*
* Typical usage:
*
* .text
* .align 4
* .global my_spill_function
* .type my_spill_function,@function
* my_spill_function:
* window_spill_function
*
* On entry to resulting function:
* none
* On exit from resulting function:
* none (no caller windows are in live registers)
*/
.macro window_spill_function
#if XCHAL_HAVE_WINDOWED
# if XCHAL_NUM_AREGS == 32
entry sp, 48
bbci.l a0, 31, 1f // branch if called with call4
bbsi.l a0, 30, 2f // branch if called with call12
call8 .L__wdwspill_assist16 // called with call8, only need another 8
retw
1: call12 .L__wdwspill_assist16 // called with call4, only need another 12
retw
2: call4 .L__wdwspill_assist16 // called with call12, only need another 4
retw
# elif XCHAL_NUM_AREGS == 64
entry sp, 48
bbci.l a0, 31, 1f // branch if called with call4
bbsi.l a0, 30, 2f // branch if called with call12
call4 .L__wdwspill_assist52 // called with call8, only need a call4
retw
1: call8 .L__wdwspill_assist52 // called with call4, only need a call8
retw
2: call12 .L__wdwspill_assist40 // called with call12, can skip a call12
retw
# elif XCHAL_NUM_AREGS == 16
entry sp, 16
bbci.l a0, 31, 1f // branch if called with call4
bbsi.l a0, 30, 2f // branch if called with call12
movi a7, 0 // called with call8
retw
1: movi a11, 0 // called with call4
2: retw // if called with call12, everything already spilled
// movi a15, 0 // trick to spill all but the direct caller
// j 1f
// // The entry instruction is magical in the assembler (gets auto-aligned)
// // so we have to jump to it to avoid falling through the padding.
// // We need entry/retw to know where to return.
//1: entry sp, 16
// retw
# else
# error "unrecognized address register file size"
# endif
#endif /* XCHAL_HAVE_WINDOWED */
window_spill_common
.endm // window_spill_function
/*----------------------------------------------------------------------
* window_spill_common
*
* Common code used by any number of invocations of the window_spill##
* and window_spill_function macros.
*
* Must be instantiated exactly once within a given assembly unit,
* within call/j range of and same section as window_spill##
* macro invocations for that assembly unit.
* (Is automatically instantiated by the window_spill_function macro.)
*/
.macro window_spill_common
#if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64)
.ifndef .L__wdwspill_defined
# if XCHAL_NUM_AREGS >= 64
.L__wdwspill_assist60:
entry sp, 32
call8 .L__wdwspill_assist52
retw
.L__wdwspill_assist56:
entry sp, 16
call4 .L__wdwspill_assist52
retw
.L__wdwspill_assist52:
entry sp, 48
call12 .L__wdwspill_assist40
retw
.L__wdwspill_assist40:
entry sp, 48
call12 .L__wdwspill_assist28
retw
# endif
.L__wdwspill_assist28:
entry sp, 48
call12 .L__wdwspill_assist16
retw
.L__wdwspill_assist24:
entry sp, 32
call8 .L__wdwspill_assist16
retw
.L__wdwspill_assist20:
entry sp, 16
call4 .L__wdwspill_assist16
retw
.L__wdwspill_assist16:
entry sp, 16
movi a15, 0
retw
.set .L__wdwspill_defined, 1
.endif
#endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */
.endm // window_spill_common
/*----------------------------------------------------------------------
* beqi32
*
* macro implements version of beqi for arbitrary 32-bit immediate value
*
* beqi32 ax, ay, imm32, label
*
* Compares value in register ax with imm32 value and jumps to label if
* equal. Clobbers register ay if needed
*
*/
.macro beqi32 ax, ay, imm, label
.ifeq ((\imm-1) & ~7) // 1..8 ?
beqi \ax, \imm, \label
.else
.ifeq (\imm+1) // -1 ?
beqi \ax, \imm, \label
.else
.ifeq (\imm) // 0 ?
beqz \ax, \label
.else
// We could also handle immediates 10,12,16,32,64,128,256
// but it would be a long macro...
movi \ay, \imm
beq \ax, \ay, \label
.endif
.endif
.endif
.endm // beqi32
/*----------------------------------------------------------------------
* isync_retw_nop
*
* This macro must be invoked immediately after ISYNC if ISYNC
* would otherwise be immediately followed by RETW (or other instruction
* modifying WindowBase or WindowStart), in a context where
* kernel vector mode may be selected, and level-one interrupts
* and window overflows may be enabled, on an XEA1 configuration.
*
* On hardware with erratum "XEA1KWIN" (see <xtensa/core.h> for details),
* XEA1 code must have at least one instruction between ISYNC and RETW if
* run in kernel vector mode with interrupts and window overflows enabled.
*/
.macro isync_retw_nop
#if XCHAL_MAYHAVE_ERRATUM_XEA1KWIN
nop
#endif
.endm
/*----------------------------------------------------------------------
* abs
*
* implements abs on machines that do not have it configured
*/
#if !XCHAL_HAVE_ABS
.macro abs arr, ars
.ifc \arr, \ars
//src equal dest is less efficient
bgez \arr, 1f
neg \arr, \arr
1:
.else
neg \arr, \ars
movgez \arr, \ars, \ars
.endif
.endm
#endif /* !XCHAL_HAVE_ABS */
/*----------------------------------------------------------------------
* addx2
*
* implements addx2 on machines that do not have it configured
*
*/
#if !XCHAL_HAVE_ADDX
.macro addx2 arr, ars, art
.ifc \arr, \art
.ifc \arr, \ars
// addx2 a, a, a (not common)
.err
.else
add \arr, \ars, \art
add \arr, \ars, \art
.endif
.else
//addx2 a, b, c
//addx2 a, a, b
//addx2 a, b, b
slli \arr, \ars, 1
add \arr, \arr, \art
.endif
.endm
#endif /* !XCHAL_HAVE_ADDX */
/*----------------------------------------------------------------------
* addx4
*
* implements addx4 on machines that do not have it configured
*
*/
#if !XCHAL_HAVE_ADDX
.macro addx4 arr, ars, art
.ifc \arr, \art
.ifc \arr, \ars
// addx4 a, a, a (not common)
.err
.else
//# addx4 a, b, a
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
.endif
.else
//addx4 a, b, c
//addx4 a, a, b
//addx4 a, b, b
slli \arr, \ars, 2
add \arr, \arr, \art
.endif
.endm
#endif /* !XCHAL_HAVE_ADDX */
/*----------------------------------------------------------------------
* addx8
*
* implements addx8 on machines that do not have it configured
*
*/
#if !XCHAL_HAVE_ADDX
.macro addx8 arr, ars, art
.ifc \arr, \art
.ifc \arr, \ars
//addx8 a, a, a (not common)
.err
.else
//addx8 a, b, a
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
add \arr, \ars, \art
.endif
.else
//addx8 a, b, c
//addx8 a, a, b
//addx8 a, b, b
slli \arr, \ars, 3
add \arr, \arr, \art
.endif
.endm
#endif /* !XCHAL_HAVE_ADDX */
/*----------------------------------------------------------------------
* rfe_rfue
*
* Maps to RFUE on XEA1, and RFE on XEA2. No mapping on XEAX.
*/
#if XCHAL_HAVE_XEA1
.macro rfe_rfue
rfue
.endm
#elif XCHAL_HAVE_XEA2
.macro rfe_rfue
rfe
.endm
#endif
/*----------------------------------------------------------------------
* abi_entry
*
* Generate proper function entry sequence for the current ABI
* (windowed or call0). Takes care of allocating stack space (up to 1kB)
* and saving the return PC, if necessary. The corresponding abi_return
* macro does the corresponding stack deallocation and restoring return PC.
*
* Parameters are:
*
* locsize Number of bytes to allocate on the stack
* for local variables (and for args to pass to
* callees, if any calls are made). Defaults to zero.
* The macro rounds this up to a multiple of 16.
* NOTE: large values are allowed (e.g. up to 1 GB).
*
* callsize Maximum call size made by this function.
* Leave zero (default) for leaf functions, i.e. if
* this function makes no calls to other functions.
* Otherwise must be set to 4, 8, or 12 according
* to whether the "largest" call made is a call[x]4,
* call[x]8, or call[x]12 (for call0 ABI, it makes
* no difference whether this is set to 4, 8 or 12,
* but it must be set to one of these values).
*
* NOTE: It is up to the caller to align the entry point, declare the
* function symbol, make it global, etc.
*
* NOTE: This macro relies on assembler relaxation for large values
* of locsize. It might not work with the no-transform directive.
* NOTE: For the call0 ABI, this macro ensures SP is allocated or
* de-allocated cleanly, i.e. without temporarily allocating too much
* (or allocating negatively!) due to addi relaxation.
*
* NOTE: Generating the proper sequence and register allocation for
* making calls in an ABI independent manner is a separate topic not
* covered by this macro.
*
* NOTE: To access arguments, you can't use a fixed offset from SP.
* The offset depends on the ABI, whether the function is leaf, etc.
* The simplest method is probably to use the .locsz symbol, which
* is set by this macro to the actual number of bytes allocated on
* the stack, in other words, to the offset from SP to the arguments.
* E.g. for a function whose arguments are all 32-bit integers, you
* can get the 7th and 8th arguments (1st and 2nd args stored on stack)
* using:
* l32i a2, sp, .locsz
* l32i a3, sp, .locsz+4
* (this example works as long as locsize is under L32I's offset limit
* of 1020 minus up to 48 bytes of ABI-specific stack usage;
* otherwise you might first need to do "addi a?, sp, .locsz"
* or similar sequence).
*
* NOTE: For call0 ABI, this macro (and abi_return) may clobber a9
* (a caller-saved register).
*
* Examples:
* abi_entry
* abi_entry 5
* abi_entry 22, 8
* abi_entry 0, 4
*/
/*
* Compute .locsz and .callsz without emitting any instructions.
* Used by both abi_entry and abi_return.
* Assumes locsize >= 0.
*/
.macro abi_entry_size locsize=0, callsize=0
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
.ifeq \callsize
.set .callsz, 16
.else
.ifeq \callsize-4
.set .callsz, 16
.else
.ifeq \callsize-8
.set .callsz, 32
.else
.ifeq \callsize-12
.set .callsz, 48
.else
.error "abi_entry: invalid call size \callsize"
.endif
.endif
.endif
.endif
.set .locsz, .callsz + ((\locsize + 15) & -16)
#else
.set .callsz, \callsize
.if .callsz /* if calls, need space for return PC */
.set .locsz, (\locsize + 4 + 15) & -16
.else
.set .locsz, (\locsize + 15) & -16
.endif
#endif
.endm
.macro abi_entry locsize=0, callsize=0
.iflt \locsize
.error "abi_entry: invalid negative size of locals (\locsize)"
.endif
abi_entry_size \locsize, \callsize
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
.ifgt .locsz - 32760 /* .locsz > 32760 (ENTRY's max range)? */
/* Funky computation to try to have assembler use addmi efficiently if possible: */
entry sp, 0x7F00 + (.locsz & 0xF0)
addi a12, sp, - ((.locsz & -0x100) - 0x7F00)
movsp sp, a12
.else
entry sp, .locsz
.endif
#else
.if .locsz
.ifle .locsz - 128 /* if locsz <= 128 */
addi sp, sp, -.locsz
.if .callsz
s32i a0, sp, .locsz - 4
.endif
.elseif .callsz /* locsz > 128, with calls: */
movi a9, .locsz - 16 /* note: a9 is caller-saved */
addi sp, sp, -16
s32i a0, sp, 12
sub sp, sp, a9
.else /* locsz > 128, no calls: */
movi a9, .locsz
sub sp, sp, a9
.endif /* end */
.endif
#endif
.endm
/*----------------------------------------------------------------------
* abi_return
*
* Generate proper function exit sequence for the current ABI
* (windowed or call0). Takes care of freeing stack space and
* restoring the return PC, if necessary.
* NOTE: This macro MUST be invoked following a corresponding
* abi_entry macro invocation. For call0 ABI in particular,
* all stack and PC restoration are done according to the last
* abi_entry macro invoked before this macro in the assembly file.
*
* Normally this macro takes no arguments. However to allow
* for placing abi_return *before* abi_entry (as must be done
* for some highly optimized assembly), it optionally takes
* exactly the same arguments as abi_entry.
*/
.macro abi_return locsize=-1, callsize=0
.ifge \locsize
abi_entry_size \locsize, \callsize
.endif
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw
#else
.if .locsz
.iflt .locsz - 128 /* if locsz < 128 */
.if .callsz
l32i a0, sp, .locsz - 4
.endif
addi sp, sp, .locsz
.elseif .callsz /* locsz >= 128, with calls: */
addi a9, sp, .locsz - 16
l32i a0, a9, 12
addi sp, a9, 16
.else /* locsz >= 128, no calls: */
movi a9, .locsz
add sp, sp, a9
.endif /* end */
.endif
ret
#endif
.endm
#endif /*XTENSA_COREASM_H*/

View File

@ -0,0 +1,164 @@
/*
* xtensa/corebits.h - Xtensa Special Register field positions, masks, values.
*
* (In previous releases, these were defined in specreg.h, a generated file.
* This file is not generated, ie. it is processor configuration independent.)
*/
/* $Id: //depot/rel/Boreal/Xtensa/OS/include/xtensa/corebits.h#2 $ */
/*
* Copyright (c) 2005-2007 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTENSA_COREBITS_H
#define XTENSA_COREBITS_H
/* EXCCAUSE register fields: */
#define EXCCAUSE_EXCCAUSE_SHIFT 0
#define EXCCAUSE_EXCCAUSE_MASK 0x3F
/* EXCCAUSE register values: */
/*
* General Exception Causes
* (values of EXCCAUSE special register set by general exceptions,
* which vector to the user, kernel, or double-exception vectors).
*/
#define EXCCAUSE_ILLEGAL 0 /* Illegal Instruction */
#define EXCCAUSE_SYSCALL 1 /* System Call (SYSCALL instruction) */
#define EXCCAUSE_INSTR_ERROR 2 /* Instruction Fetch Error */
# define EXCCAUSE_IFETCHERROR 2 /* (backward compatibility macro, deprecated, avoid) */
#define EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error */
# define EXCCAUSE_LOADSTOREERROR 3 /* (backward compatibility macro, deprecated, avoid) */
#define EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt */
# define EXCCAUSE_LEVEL1INTERRUPT 4 /* (backward compatibility macro, deprecated, avoid) */
#define EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (MOVSP instruction) for alloca */
#define EXCCAUSE_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero */
#define EXCCAUSE_SPECULATION 7 /* Use of Failed Speculative Access (not implemented) */
#define EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction */
#define EXCCAUSE_UNALIGNED 9 /* Unaligned Load or Store */
/* Reserved 10..11 */
#define EXCCAUSE_INSTR_DATA_ERROR 12 /* PIF Data Error on Instruction Fetch (RB-200x and later) */
#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 /* PIF Data Error on Load or Store (RB-200x and later) */
#define EXCCAUSE_INSTR_ADDR_ERROR 14 /* PIF Address Error on Instruction Fetch (RB-200x and later) */
#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 /* PIF Address Error on Load or Store (RB-200x and later) */
#define EXCCAUSE_ITLB_MISS 16 /* ITLB Miss (no ITLB entry matches, hw refill also missed) */
#define EXCCAUSE_ITLB_MULTIHIT 17 /* ITLB Multihit (multiple ITLB entries match) */
#define EXCCAUSE_INSTR_RING 18 /* Ring Privilege Violation on Instruction Fetch */
/* Reserved 19 */ /* Size Restriction on IFetch (not implemented) */
#define EXCCAUSE_INSTR_PROHIBITED 20 /* Cache Attribute does not allow Instruction Fetch */
/* Reserved 21..23 */
#define EXCCAUSE_DTLB_MISS 24 /* DTLB Miss (no DTLB entry matches, hw refill also missed) */
#define EXCCAUSE_DTLB_MULTIHIT 25 /* DTLB Multihit (multiple DTLB entries match) */
#define EXCCAUSE_LOAD_STORE_RING 26 /* Ring Privilege Violation on Load or Store */
/* Reserved 27 */ /* Size Restriction on Load/Store (not implemented) */
#define EXCCAUSE_LOAD_PROHIBITED 28 /* Cache Attribute does not allow Load */
#define EXCCAUSE_STORE_PROHIBITED 29 /* Cache Attribute does not allow Store */
/* Reserved 30..31 */
#define EXCCAUSE_CP_DISABLED(n) (32+(n)) /* Access to Coprocessor 'n' when disabled */
#define EXCCAUSE_CP0_DISABLED 32 /* Access to Coprocessor 0 when disabled */
#define EXCCAUSE_CP1_DISABLED 33 /* Access to Coprocessor 1 when disabled */
#define EXCCAUSE_CP2_DISABLED 34 /* Access to Coprocessor 2 when disabled */
#define EXCCAUSE_CP3_DISABLED 35 /* Access to Coprocessor 3 when disabled */
#define EXCCAUSE_CP4_DISABLED 36 /* Access to Coprocessor 4 when disabled */
#define EXCCAUSE_CP5_DISABLED 37 /* Access to Coprocessor 5 when disabled */
#define EXCCAUSE_CP6_DISABLED 38 /* Access to Coprocessor 6 when disabled */
#define EXCCAUSE_CP7_DISABLED 39 /* Access to Coprocessor 7 when disabled */
/*#define EXCCAUSE_FLOATING_POINT 40*/ /* Floating Point Exception (not implemented) */
/* Reserved 40..63 */
/* PS register fields: */
#define PS_WOE_SHIFT 18
#define PS_WOE_MASK 0x00040000
#define PS_WOE PS_WOE_MASK
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_CALLINC(n) (((n)&3)<<PS_CALLINC_SHIFT) /* n = 0..3 */
#define PS_OWB_SHIFT 8
#define PS_OWB_MASK 0x00000F00
#define PS_OWB(n) (((n)&15)<<PS_OWB_SHIFT) /* n = 0..15 (or 0..7) */
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_RING(n) (((n)&3)<<PS_RING_SHIFT) /* n = 0..3 */
#define PS_UM_SHIFT 5
#define PS_UM_MASK 0x00000020
#define PS_UM PS_UM_MASK
#define PS_EXCM_SHIFT 4
#define PS_EXCM_MASK 0x00000010
#define PS_EXCM PS_EXCM_MASK
#define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_MASK 0x0000000F
#define PS_INTLEVEL(n) ((n)&PS_INTLEVEL_MASK) /* n = 0..15 */
/* Backward compatibility (deprecated): */
#define PS_PROGSTACK_SHIFT PS_UM_SHIFT
#define PS_PROGSTACK_MASK PS_UM_MASK
#define PS_PROG_SHIFT PS_UM_SHIFT
#define PS_PROG_MASK PS_UM_MASK
#define PS_PROG PS_UM
/* DBREAKCn register fields: */
#define DBREAKC_MASK_SHIFT 0
#define DBREAKC_MASK_MASK 0x0000003F
#define DBREAKC_LOADBREAK_SHIFT 30
#define DBREAKC_LOADBREAK_MASK 0x40000000
#define DBREAKC_STOREBREAK_SHIFT 31
#define DBREAKC_STOREBREAK_MASK 0x80000000
/* DEBUGCAUSE register fields: */
#define DEBUGCAUSE_DEBUGINT_SHIFT 5
#define DEBUGCAUSE_DEBUGINT_MASK 0x20 /* debug interrupt */
#define DEBUGCAUSE_BREAKN_SHIFT 4
#define DEBUGCAUSE_BREAKN_MASK 0x10 /* BREAK.N instruction */
#define DEBUGCAUSE_BREAK_SHIFT 3
#define DEBUGCAUSE_BREAK_MASK 0x08 /* BREAK instruction */
#define DEBUGCAUSE_DBREAK_SHIFT 2
#define DEBUGCAUSE_DBREAK_MASK 0x04 /* DBREAK match */
#define DEBUGCAUSE_IBREAK_SHIFT 1
#define DEBUGCAUSE_IBREAK_MASK 0x02 /* IBREAK match */
#define DEBUGCAUSE_ICOUNT_SHIFT 0
#define DEBUGCAUSE_ICOUNT_MASK 0x01 /* ICOUNT would increment to zero */
/* MESR register fields: */
#define MESR_MEME 0x00000001 /* memory error */
#define MESR_MEME_SHIFT 0
#define MESR_DME 0x00000002 /* double memory error */
#define MESR_DME_SHIFT 1
#define MESR_RCE 0x00000010 /* recorded memory error */
#define MESR_RCE_SHIFT 4
#define MESR_LCE
#define MESR_LCE_SHIFT ?
#define MESR_LCE_L
#define MESR_ERRENAB 0x00000100
#define MESR_ERRENAB_SHIFT 8
#define MESR_ERRTEST 0x00000200
#define MESR_ERRTEST_SHIFT 9
#define MESR_DATEXC 0x00000400
#define MESR_DATEXC_SHIFT 10
#define MESR_INSEXC 0x00000800
#define MESR_INSEXC_SHIFT 11
#define MESR_WAYNUM_SHIFT 16
#define MESR_ACCTYPE_SHIFT 20
#define MESR_MEMTYPE_SHIFT 24
#define MESR_ERRTYPE_SHIFT 30
#endif /*XTENSA_COREBITS_H*/

View File

@ -0,0 +1,925 @@
/*
xtensa/hal.h -- contains a definition of the Core HAL interface
All definitions in this header file are independent of any specific
Xtensa processor configuration. Thus software (eg. OS, application,
etc) can include this header file and be compiled into configuration-
independent objects that can be distributed and eventually linked
to the HAL library (libhal.a) to create a configuration-specific
final executable.
Certain definitions, however, are release/version-specific -- such as
the XTHAL_RELEASE_xxx macros (or additions made in later versions).
$Id: //depot/rel/Boreal/Xtensa/OS/target-os-src/hal.h.tpp#3 $
Copyright (c) 1999-2010 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTENSA_HAL_H
#define XTENSA_HAL_H
/****************************************************************************
Definitions Useful for Any Code, USER or PRIVILEGED
****************************************************************************/
/*----------------------------------------------------------------------
Constant Definitions (shared with assembly)
----------------------------------------------------------------------*/
/*
* Software (Xtensa Tools) version information. Not configuration-specific!
*
* NOTE: "release" is a misnomer here, these are really product "version"
* numbers. A "release" is a collection of product versions
* made available at once (together) to customers.
* In the past, release and version names all matched in T####.# form,
* making the distinction irrelevant. This is no longer the case.
*/
#define XTHAL_RELEASE_MAJOR 8000
#define XTHAL_RELEASE_MINOR 1
#define XTHAL_RELEASE_NAME "8.0.1"
#define XTHAL_REL_8 1
#define XTHAL_REL_8_0 1
#define XTHAL_REL_8_0_1 1
/* HAL version numbers (these names are for backward compatibility): */
#define XTHAL_MAJOR_REV XTHAL_RELEASE_MAJOR
#define XTHAL_MINOR_REV XTHAL_RELEASE_MINOR
/*
* A bit of software release/version history on values of XTHAL_{MAJOR,MINOR}_REV:
*
* SW Version MAJOR MINOR Comment
* ======= ===== ===== =======
* T1015.n n/a n/a (HAL not yet available)
* T1020.{0,1,2} 0 1 (HAL beta)
* T1020.{3,4} 0 2 First release.
* T1020.n (n>4) 0 2 or >3 (TBD)
* T1030.0 0 1 (HAL beta)
* T1030.{1,2} 0 3 Equivalent to first release.
* T1030.n (n>=3) 0 >= 3 (TBD)
* T1040.n 1040 n Full CHAL available from T1040.2
* T1050.n 1050 n .
* 6.0.n 6000 n Xtensa Tools v6 (RA-200x.n)
* 7.0.n 7000 n Xtensa Tools v7 (RB-200x.n)
* 7.1.n 7010 n Xtensa Tools v7.1 (RB-200x.(n+2))
*
*
* Note: there is a distinction between the software version with
* which something is compiled (accessible using XTHAL_RELEASE_* macros)
* and the software version with which the HAL library was compiled
* (accessible using Xthal_release_* global variables). This
* distinction is particularly relevant for vendors that distribute
* configuration-independent binaries (eg. an OS), where their customer
* might link it with a HAL of a different Xtensa software version.
* In this case, it may be appropriate for the OS to verify at run-time
* whether XTHAL_RELEASE_* and Xthal_release_* are compatible.
* [Guidelines as to which version is compatible with which are not
* currently provided explicitly, but might be inferred from reading
* OSKit documentation for all releases -- compatibility is also highly
* dependent on which HAL features are used. Each version is usually
* backward compatible, with very few exceptions if any.]
*
* Notes:
* Tornado 2.0 supported in T1020.3+, T1030.1+, and T1040.{0,1} only.
* Tornado 2.0.2 supported in T1040.2+, T1050, and 6.0.
* Compile-time HAL port of NucleusPlus supported by T1040.2 and later.
*/
/* Version comparison operators (among major/minor pairs): */
#define XTHAL_REL_GE(maja,mina, majb,minb) ((maja) > (majb) || \
((maja) == (majb) && (mina) >= (minb)))
#define XTHAL_REL_GT(maja,mina, majb,minb) ((maja) > (majb) || \
((maja) == (majb) && (mina) > (minb)))
#define XTHAL_REL_LE(maja,mina, majb,minb) ((maja) < (majb) || \
((maja) == (majb) && (mina) <= (minb)))
#define XTHAL_REL_LT(maja,mina, majb,minb) ((maja) < (majb) || \
((maja) == (majb) && (mina) < (minb)))
#define XTHAL_REL_EQ(maja,mina, majb,minb) ((maja) == (majb) && (mina) == (minb))
/* Fuzzy (3-way) logic operators: */
#define XTHAL_MAYBE -1 /* 0=NO, 1=YES, -1=MAYBE */
#define XTHAL_FUZZY_AND(a,b) (((a)==0 || (b)==0) ? 0 : ((a)==1 && (b)==1) ? 1 : XTHAL_MAYBE)
#define XTHAL_FUZZY_OR(a,b) (((a)==1 || (b)==1) ? 1 : ((a)==0 && (b)==0) ? 0 : XTHAL_MAYBE)
#define XTHAL_FUZZY_NOT(a) (((a)==0 || (a)==1) ? (1-(a)) : XTHAL_MAYBE)
/*
* Architectural limit, independent of configuration:
*/
#define XTHAL_MAX_CPS 8 /* max number of coprocessors (0..7) */
/* Misc: */
#define XTHAL_LITTLEENDIAN 0
#define XTHAL_BIGENDIAN 1
#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__)
#ifdef __cplusplus
extern "C" {
#endif
/*----------------------------------------------------------------------
HAL
----------------------------------------------------------------------*/
/* Constant to be checked in build = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV */
extern const unsigned int Xthal_rev_no;
/*----------------------------------------------------------------------
Optional/Custom Processor State
----------------------------------------------------------------------*/
/* save & restore the extra processor state */
extern void xthal_save_extra(void *base);
extern void xthal_restore_extra(void *base);
extern void xthal_save_cpregs(void *base, int);
extern void xthal_restore_cpregs(void *base, int);
/* versions specific to each coprocessor id */
extern void xthal_save_cp0(void *base);
extern void xthal_save_cp1(void *base);
extern void xthal_save_cp2(void *base);
extern void xthal_save_cp3(void *base);
extern void xthal_save_cp4(void *base);
extern void xthal_save_cp5(void *base);
extern void xthal_save_cp6(void *base);
extern void xthal_save_cp7(void *base);
extern void xthal_restore_cp0(void *base);
extern void xthal_restore_cp1(void *base);
extern void xthal_restore_cp2(void *base);
extern void xthal_restore_cp3(void *base);
extern void xthal_restore_cp4(void *base);
extern void xthal_restore_cp5(void *base);
extern void xthal_restore_cp6(void *base);
extern void xthal_restore_cp7(void *base);
/* pointers to each of the functions above */
extern void* Xthal_cpregs_save_fn[XTHAL_MAX_CPS];
extern void* Xthal_cpregs_restore_fn[XTHAL_MAX_CPS];
/* similarly for non-windowed ABI (may be same or different) */
extern void* Xthal_cpregs_save_nw_fn[XTHAL_MAX_CPS];
extern void* Xthal_cpregs_restore_nw_fn[XTHAL_MAX_CPS];
/*extern void xthal_save_all_extra(void *base);*/
/*extern void xthal_restore_all_extra(void *base);*/
/* space for processor state */
extern const unsigned int Xthal_extra_size;
extern const unsigned int Xthal_extra_align;
extern const unsigned int Xthal_cpregs_size[XTHAL_MAX_CPS];
extern const unsigned int Xthal_cpregs_align[XTHAL_MAX_CPS];
extern const unsigned int Xthal_all_extra_size;
extern const unsigned int Xthal_all_extra_align;
/* coprocessor names */
extern const char * const Xthal_cp_names[XTHAL_MAX_CPS];
/* initialize the extra processor */
/*extern void xthal_init_extra(void);*/
/* initialize the TIE coprocessor */
/*extern void xthal_init_cp(int);*/
/* initialize the extra processor */
extern void xthal_init_mem_extra(void *);
/* initialize the TIE coprocessor */
extern void xthal_init_mem_cp(void *, int);
/* the number of TIE coprocessors contiguous from zero (for Tor2) */
extern const unsigned int Xthal_num_coprocessors;
/* actual number of coprocessors */
extern const unsigned char Xthal_cp_num;
/* index of highest numbered coprocessor, plus one */
extern const unsigned char Xthal_cp_max;
/* index of highest allowed coprocessor number, per cfg, plus one */
/*extern const unsigned char Xthal_cp_maxcfg;*/
/* bitmask of which coprocessors are present */
extern const unsigned int Xthal_cp_mask;
/* read & write extra state register */
/*extern int xthal_read_extra(void *base, unsigned reg, unsigned *value);*/
/*extern int xthal_write_extra(void *base, unsigned reg, unsigned value);*/
/* read & write a TIE coprocessor register */
/*extern int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value);*/
/*extern int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value);*/
/* return coprocessor number based on register */
/*extern int xthal_which_cp(unsigned reg);*/
/*----------------------------------------------------------------------
Register Windows
----------------------------------------------------------------------*/
/* number of registers in register window */
extern const unsigned int Xthal_num_aregs;
extern const unsigned char Xthal_num_aregs_log2;
/*----------------------------------------------------------------------
Cache
----------------------------------------------------------------------*/
/* size of the cache lines in log2(bytes) */
extern const unsigned char Xthal_icache_linewidth;
extern const unsigned char Xthal_dcache_linewidth;
/* size of the cache lines in bytes (2^linewidth) */
extern const unsigned short Xthal_icache_linesize;
extern const unsigned short Xthal_dcache_linesize;
/* size of the caches in bytes (ways * 2^(linewidth + setwidth)) */
extern const unsigned int Xthal_icache_size;
extern const unsigned int Xthal_dcache_size;
/* cache features */
extern const unsigned char Xthal_dcache_is_writeback;
/* invalidate the caches */
extern void xthal_icache_region_invalidate( void *addr, unsigned size );
extern void xthal_dcache_region_invalidate( void *addr, unsigned size );
extern void xthal_icache_line_invalidate(void *addr);
extern void xthal_dcache_line_invalidate(void *addr);
/* write dirty data back */
extern void xthal_dcache_region_writeback( void *addr, unsigned size );
extern void xthal_dcache_line_writeback(void *addr);
/* write dirty data back and invalidate */
extern void xthal_dcache_region_writeback_inv( void *addr, unsigned size );
extern void xthal_dcache_line_writeback_inv(void *addr);
/* sync icache and memory */
extern void xthal_icache_sync( void );
/* sync dcache and memory */
extern void xthal_dcache_sync( void );
/* coherency (low-level -- not normally called directly) */
extern void xthal_cache_coherence_on( void );
extern void xthal_cache_coherence_off( void );
/* coherency (high-level) */
extern void xthal_cache_coherence_optin( void );
extern void xthal_cache_coherence_optout( void );
/* prefetch */
#define XTHAL_PREFETCH_ENABLE -1
#define XTHAL_PREFETCH_DISABLE 0
extern int xthal_set_cache_prefetch( int );
extern int xthal_get_cache_prefetch( void );
/*----------------------------------------------------------------------
Debug
----------------------------------------------------------------------*/
/* 1 if debug option configured, 0 if not: */
extern const int Xthal_debug_configured;
/* Set (plant) and remove software breakpoint, both synchronizing cache: */
extern unsigned int xthal_set_soft_break(void *addr);
extern void xthal_remove_soft_break(void *addr, unsigned int);
/*----------------------------------------------------------------------
Disassembler
----------------------------------------------------------------------*/
/* Max expected size of the return buffer for a disassembled instruction (hint only): */
#define XTHAL_DISASM_BUFSIZE 80
/* Disassembly option bits for selecting what to return: */
#define XTHAL_DISASM_OPT_ADDR 0x0001 /* display address */
#define XTHAL_DISASM_OPT_OPHEX 0x0002 /* display opcode bytes in hex */
#define XTHAL_DISASM_OPT_OPCODE 0x0004 /* display opcode name (mnemonic) */
#define XTHAL_DISASM_OPT_PARMS 0x0008 /* display parameters */
#define XTHAL_DISASM_OPT_ALL 0x0FFF /* display everything */
/* routine to get a string for the disassembled instruction */
extern int xthal_disassemble( unsigned char *instr_buf, void *tgt_addr,
char *buffer, unsigned buflen, unsigned options );
/* routine to get the size of the next instruction. Returns 0 for
illegal instruction */
extern int xthal_disassemble_size( unsigned char *instr_buf );
/*----------------------------------------------------------------------
Instruction/Data RAM/ROM Access
----------------------------------------------------------------------*/
extern void* xthal_memcpy(void *dst, const void *src, unsigned len);
extern void* xthal_bcopy(const void *src, void *dst, unsigned len);
/*----------------------------------------------------------------------
MP Synchronization
----------------------------------------------------------------------*/
extern int xthal_compare_and_set( int *addr, int test_val, int compare_val );
/*extern const char Xthal_have_s32c1i;*/
/*----------------------------------------------------------------------
Miscellaneous
----------------------------------------------------------------------*/
extern const unsigned int Xthal_release_major;
extern const unsigned int Xthal_release_minor;
extern const char * const Xthal_release_name;
extern const char * const Xthal_release_internal;
extern const unsigned char Xthal_memory_order;
extern const unsigned char Xthal_have_windowed;
extern const unsigned char Xthal_have_density;
extern const unsigned char Xthal_have_booleans;
extern const unsigned char Xthal_have_loops;
extern const unsigned char Xthal_have_nsa;
extern const unsigned char Xthal_have_minmax;
extern const unsigned char Xthal_have_sext;
extern const unsigned char Xthal_have_clamps;
extern const unsigned char Xthal_have_mac16;
extern const unsigned char Xthal_have_mul16;
extern const unsigned char Xthal_have_fp;
extern const unsigned char Xthal_have_speculation;
extern const unsigned char Xthal_have_threadptr;
extern const unsigned char Xthal_have_pif;
extern const unsigned short Xthal_num_writebuffer_entries;
extern const unsigned int Xthal_build_unique_id;
/* Version info for hardware targeted by software upgrades: */
extern const unsigned int Xthal_hw_configid0;
extern const unsigned int Xthal_hw_configid1;
extern const unsigned int Xthal_hw_release_major;
extern const unsigned int Xthal_hw_release_minor;
extern const char * const Xthal_hw_release_name;
extern const char * const Xthal_hw_release_internal;
#ifdef __cplusplus
}
#endif
#endif /*!_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */
/****************************************************************************
Definitions Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
****************************************************************************/
#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
/*----------------------------------------------------------------------
Constant Definitions (shared with assembly)
----------------------------------------------------------------------*/
/*
* Architectural limits, independent of configuration.
* Note that these are ISA-defined limits, not micro-architecture implementation
* limits enforced by the Xtensa Processor Generator (which may be stricter than
* these below).
*/
#define XTHAL_MAX_INTERRUPTS 32 /* max number of interrupts (0..31) */
#define XTHAL_MAX_INTLEVELS 16 /* max number of interrupt levels (0..15) */
/* (as of T1040, implementation limit is 7: 0..6) */
#define XTHAL_MAX_TIMERS 4 /* max number of timers (CCOMPARE0..CCOMPARE3) */
/* (as of T1040, implementation limit is 3: 0..2) */
/* Interrupt types: */
#define XTHAL_INTTYPE_UNCONFIGURED 0
#define XTHAL_INTTYPE_SOFTWARE 1
#define XTHAL_INTTYPE_EXTERN_EDGE 2
#define XTHAL_INTTYPE_EXTERN_LEVEL 3
#define XTHAL_INTTYPE_TIMER 4
#define XTHAL_INTTYPE_NMI 5
#define XTHAL_INTTYPE_WRITE_ERROR 6
#define XTHAL_MAX_INTTYPES 7 /* number of interrupt types */
/* Timer related: */
#define XTHAL_TIMER_UNCONFIGURED -1 /* Xthal_timer_interrupt[] value for non-existent timers */
#define XTHAL_TIMER_UNASSIGNED XTHAL_TIMER_UNCONFIGURED /* (for backwards compatibility only) */
/* Local Memory ECC/Parity: */
#define XTHAL_MEMEP_PARITY 1
#define XTHAL_MEMEP_ECC 2
/* Flags parameter to xthal_memep_inject_error(): */
#define XTHAL_MEMEP_F_LOCAL 0 /* local memory (default) */
#define XTHAL_MEMEP_F_DCACHE_DATA 4 /* data cache data */
#define XTHAL_MEMEP_F_DCACHE_TAG 5 /* data cache tag */
#define XTHAL_MEMEP_F_ICACHE_DATA 6 /* instruction cache data */
#define XTHAL_MEMEP_F_ICACHE_TAG 7 /* instruction cache tag */
#define XTHAL_MEMEP_F_CORRECTABLE 16 /* inject correctable error
(default is non-corr.) */
/* Access Mode bits (tentative): */ /* bit abbr unit short_name PPC equ - Description */
#define XTHAL_AMB_EXCEPTION 0 /* 001 E EX fls: EXception none
exception on any access (aka "illegal") */
#define XTHAL_AMB_HITCACHE 1 /* 002 C CH fls: use Cache on Hit ~(I CI)
[or H HC] way from tag match;
[or U UC] (ISA: same except Isolate case) */
#define XTHAL_AMB_ALLOCATE 2 /* 004 A AL fl?: ALlocate none
[or F FI fill] refill cache on miss, way from LRU
(ISA: Read/Write Miss Refill) */
#define XTHAL_AMB_WRITETHRU 3 /* 008 W WT --s: WriteThrough W WT
store immediately to memory (ISA: same) */
#define XTHAL_AMB_ISOLATE 4 /* 010 I IS fls: ISolate none
use cache regardless of hit-vs-miss,
way from vaddr (ISA: use-cache-on-miss+hit) */
#define XTHAL_AMB_GUARD 5 /* 020 G GU ?l?: GUard G *
non-speculative; spec/replay refs not permitted */
#define XTHAL_AMB_COHERENT 6 /* 040 M MC ?ls: Mem/MP Coherent M
on read, other CPU/bus-master may need to supply data;
on write, maybe redirect to or flush other CPU dirty line; etc */
#if 0
#define XTHAL_AMB_ORDERED x /* 000 O OR fls: ORdered G *
mem accesses cannot be out of order */
#define XTHAL_AMB_FUSEWRITES x /* 000 F FW --s: FuseWrites none
allow combining/merging/coalescing multiple writes
(to same datapath data unit) into one
(implied by writeback) */
#define XTHAL_AMB_TRUSTED x /* 000 T TR ?l?: TRusted none
memory will not bus error (if it does,
handle as fatal imprecise interrupt) */
#define XTHAL_AMB_PREFETCH x /* 000 P PR fl?: PRefetch none
on refill, read line+1 into prefetch buffers */
#define XTHAL_AMB_STREAM x /* 000 S ST ???: STreaming none
access one of N stream buffers */
#endif /*0*/
#define XTHAL_AM_EXCEPTION (1<<XTHAL_AMB_EXCEPTION)
#define XTHAL_AM_HITCACHE (1<<XTHAL_AMB_HITCACHE)
#define XTHAL_AM_ALLOCATE (1<<XTHAL_AMB_ALLOCATE)
#define XTHAL_AM_WRITETHRU (1<<XTHAL_AMB_WRITETHRU)
#define XTHAL_AM_ISOLATE (1<<XTHAL_AMB_ISOLATE)
#define XTHAL_AM_GUARD (1<<XTHAL_AMB_GUARD)
#define XTHAL_AM_COHERENT (1<<XTHAL_AMB_COHERENT)
#if 0
#define XTHAL_AM_ORDERED (1<<XTHAL_AMB_ORDERED)
#define XTHAL_AM_FUSEWRITES (1<<XTHAL_AMB_FUSEWRITES)
#define XTHAL_AM_TRUSTED (1<<XTHAL_AMB_TRUSTED)
#define XTHAL_AM_PREFETCH (1<<XTHAL_AMB_PREFETCH)
#define XTHAL_AM_STREAM (1<<XTHAL_AMB_STREAM)
#endif /*0*/
/*
* Allowed Access Modes (bit combinations).
*
* Columns are:
* "FOGIWACE"
* Access mode bits (see XTHAL_AMB_xxx above).
* <letter> = bit is set
* '-' = bit is clear
* '.' = bit is irrelevant / don't care, as follows:
* E=1 makes all others irrelevant
* W,F relevant only for stores
* "2345"
* Indicates which Xtensa releases support the corresponding
* access mode. Releases for each character column are:
* 2 = prior to T1020.2: T1015 (V1.5), T1020.0, T1020.1
* 3 = T1020.2 and later: T1020.2+, T1030
* 4 = T1040
* 5 = T1050 (maybe), LX1, LX2, LX2.1
* 7 = LX2.2
* 8 = LX3.0
* And the character column contents are:
* <number> = supported by release(s)
* "." = unsupported by release(s)
* "?" = support unknown
*/
/* FOMGIWACE 234578 */
/* For instruction fetch: */
#define XTHAL_FAM_EXCEPTION 0x001 /* ........E 234578 exception */
/*efine XTHAL_FAM_ISOLATE*/ /*0x012*/ /* .---I.-C- ...... isolate */
#define XTHAL_FAM_BYPASS 0x000 /* .----.--- 234578 bypass */
/*efine XTHAL_FAM_NACACHED*/ /*0x002*/ /* .----.-C- ...... cached no-allocate (frozen) */
#define XTHAL_FAM_CACHED 0x006 /* .----.AC- 234578 cached */
/* For data load: */
#define XTHAL_LAM_EXCEPTION 0x001 /* ........E 234578 exception */
#define XTHAL_LAM_ISOLATE 0x012 /* .---I.-C- 234578 isolate */
#define XTHAL_LAM_BYPASS 0x000 /* .O---.--- 2..... bypass speculative */
#define XTHAL_LAM_BYPASSG 0x020 /* .O-G-.--- .34578 bypass guarded */
#define XTHAL_LAM_CACHED_NOALLOC 0x002 /* .O---.-C- 234578 cached no-allocate speculative */
#define XTHAL_LAM_NACACHED XTHAL_LAM_CACHED_NOALLOC
#define XTHAL_LAM_NACACHEDG 0x022 /* .O-G-.-C- .?.... cached no-allocate guarded */
#define XTHAL_LAM_CACHED 0x006 /* .----.AC- 234578 cached speculative */
#define XTHAL_LAM_COHCACHED 0x046 /* .-M--.AC- ....*8 cached speculative MP-coherent */
/* For data store: */
#define XTHAL_SAM_EXCEPTION 0x001 /* ........E 234578 exception */
#define XTHAL_SAM_ISOLATE 0x032 /* .--GI--C- 234578 isolate */
#define XTHAL_SAM_BYPASS 0x028 /* -O-G-W--- 234578 bypass */
#define XTHAL_SAM_WRITETHRU 0x02A /* -O-G-W-C- 234578 writethrough */
/*efine XTHAL_SAM_WRITETHRU_ALLOC*/ /*0x02E*/ /* -O-G-WAC- ...... writethrough allocate */
#define XTHAL_SAM_WRITEBACK 0x026 /* F-MG--AC- ...578 writeback */
#define XTHAL_SAM_COHWRITEBACK 0x066 /* F-MG--AC- ....*8 writeback MP-coherent */
#define XTHAL_SAM_WRITEBACK_NOALLOC 0x022 /* ?--G---C- .....8 writeback no-allocate */
#if 0
/*
Cache attribute encoding for CACHEATTR (per ISA):
(Note: if this differs from ISA Ref Manual, ISA has precedence)
Inst-fetches Loads Stores
------------- ------------ -------------
0x0 FCA_EXCEPTION LCA_NACACHED SCA_WRITETHRU cached no-allocate (previously misnamed "uncached")
0x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached
0x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass cache (what most people call uncached)
0x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate
or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
0x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK[M] write-back [MP-coherent]
or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
0x5 FCA_CACHED LCA_CACHED SCA_WRITEBACK_NOALLOC write-back no-allocate
or FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
0x6..D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved)
0xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate
0xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal
* Prior to T1020.2?, guard feature not supported, this defaulted to speculative (no _G)
*/
#endif /*0*/
#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__)
#ifdef __cplusplus
extern "C" {
#endif
/*----------------------------------------------------------------------
Register Windows
----------------------------------------------------------------------*/
/* This spill any live register windows (other than the caller's):
* (NOTE: current implementation require privileged code, but
* a user-callable implementation is possible.) */
extern void xthal_window_spill( void );
/*----------------------------------------------------------------------
Optional/Custom Processor State
----------------------------------------------------------------------*/
/* validate & invalidate the TIE register file */
extern void xthal_validate_cp(int);
extern void xthal_invalidate_cp(int);
/* read and write cpenable register */
extern void xthal_set_cpenable(unsigned);
extern unsigned xthal_get_cpenable(void);
/*----------------------------------------------------------------------
Interrupts
----------------------------------------------------------------------*/
/* the number of interrupt levels */
extern const unsigned char Xthal_num_intlevels;
/* the number of interrupts */
extern const unsigned char Xthal_num_interrupts;
/* mask for level of interrupts */
extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS];
/* mask for level 0 to N interrupts */
extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS];
/* level of each interrupt */
extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS];
/* type per interrupt */
extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS];
/* masks of each type of interrupt */
extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES];
/* interrupt numbers assigned to each timer interrupt */
extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS];
/* INTENABLE,INTERRUPT,INTSET,INTCLEAR register access functions: */
extern unsigned xthal_get_intenable( void );
extern void xthal_set_intenable( unsigned );
extern unsigned xthal_get_interrupt( void );
#define xthal_get_intread xthal_get_interrupt /* backward compatibility */
extern void xthal_set_intset( unsigned );
extern void xthal_set_intclear( unsigned );
/*----------------------------------------------------------------------
Debug
----------------------------------------------------------------------*/
/* Number of instruction and data break registers: */
extern const int Xthal_num_ibreak;
extern const int Xthal_num_dbreak;
/*----------------------------------------------------------------------
Core Counter
----------------------------------------------------------------------*/
/* counter info */
extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */
extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */
/* get CCOUNT register (if not present return 0) */
extern unsigned xthal_get_ccount(void);
/* set and get CCOMPAREn registers (if not present, get returns 0) */
extern void xthal_set_ccompare(int, unsigned);
extern unsigned xthal_get_ccompare(int);
/*----------------------------------------------------------------------
Miscellaneous
----------------------------------------------------------------------*/
extern const unsigned char Xthal_have_prid;
extern const unsigned char Xthal_have_exceptions;
extern const unsigned char Xthal_xea_version;
extern const unsigned char Xthal_have_interrupts;
extern const unsigned char Xthal_have_highlevel_interrupts;
extern const unsigned char Xthal_have_nmi;
extern unsigned xthal_get_prid( void );
/*----------------------------------------------------------------------
Virtual interrupt prioritization (DEPRECATED)
----------------------------------------------------------------------*/
/* Convert between interrupt levels (as per PS.INTLEVEL) and virtual interrupt priorities: */
extern unsigned xthal_vpri_to_intlevel(unsigned vpri);
extern unsigned xthal_intlevel_to_vpri(unsigned intlevel);
/* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */
extern unsigned xthal_int_enable(unsigned);
extern unsigned xthal_int_disable(unsigned);
/* Set/get virtual priority of an interrupt: */
extern int xthal_set_int_vpri(int intnum, int vpri);
extern int xthal_get_int_vpri(int intnum);
/* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */
extern void xthal_set_vpri_locklevel(unsigned intlevel);
extern unsigned xthal_get_vpri_locklevel(void);
/* Set/get current virtual interrupt priority: */
extern unsigned xthal_set_vpri(unsigned vpri);
extern unsigned xthal_get_vpri(void);
extern unsigned xthal_set_vpri_intlevel(unsigned intlevel);
extern unsigned xthal_set_vpri_lock(void);
/*----------------------------------------------------------------------
Generic Interrupt Trampolining Support (DEPRECATED)
----------------------------------------------------------------------*/
typedef void (XtHalVoidFunc)(void);
/* Bitmask of interrupts currently trampolining down: */
extern unsigned Xthal_tram_pending;
/*
* Bitmask of which interrupts currently trampolining down synchronously are
* actually enabled; this bitmask is necessary because INTENABLE cannot hold
* that state (sync-trampolining interrupts must be kept disabled while
* trampolining); in the current implementation, any bit set here is not set
* in INTENABLE, and vice-versa; once a sync-trampoline is handled (at level
* one), its enable bit must be moved from here to INTENABLE:
*/
extern unsigned Xthal_tram_enabled;
/* Bitmask of interrupts configured for sync trampolining: */
extern unsigned Xthal_tram_sync;
/* Trampoline support functions: */
extern unsigned xthal_tram_pending_to_service( void );
extern void xthal_tram_done( unsigned serviced_mask );
extern int xthal_tram_set_sync( int intnum, int sync );
extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn );
/*----------------------------------------------------------------------
Internal Memories
----------------------------------------------------------------------*/
extern const unsigned char Xthal_num_instrom;
extern const unsigned char Xthal_num_instram;
extern const unsigned char Xthal_num_datarom;
extern const unsigned char Xthal_num_dataram;
extern const unsigned char Xthal_num_xlmi;
/* Each of the following arrays contains at least one entry,
* or as many entries as needed if more than one: */
extern const unsigned int Xthal_instrom_vaddr[];
extern const unsigned int Xthal_instrom_paddr[];
extern const unsigned int Xthal_instrom_size [];
extern const unsigned int Xthal_instram_vaddr[];
extern const unsigned int Xthal_instram_paddr[];
extern const unsigned int Xthal_instram_size [];
extern const unsigned int Xthal_datarom_vaddr[];
extern const unsigned int Xthal_datarom_paddr[];
extern const unsigned int Xthal_datarom_size [];
extern const unsigned int Xthal_dataram_vaddr[];
extern const unsigned int Xthal_dataram_paddr[];
extern const unsigned int Xthal_dataram_size [];
extern const unsigned int Xthal_xlmi_vaddr[];
extern const unsigned int Xthal_xlmi_paddr[];
extern const unsigned int Xthal_xlmi_size [];
/*----------------------------------------------------------------------
Cache
----------------------------------------------------------------------*/
/* number of cache sets in log2(lines per way) */
extern const unsigned char Xthal_icache_setwidth;
extern const unsigned char Xthal_dcache_setwidth;
/* cache set associativity (number of ways) */
extern const unsigned int Xthal_icache_ways;
extern const unsigned int Xthal_dcache_ways;
/* cache features */
extern const unsigned char Xthal_icache_line_lockable;
extern const unsigned char Xthal_dcache_line_lockable;
/* cache attribute register control (used by other HAL routines) */
extern unsigned xthal_get_cacheattr( void );
extern unsigned xthal_get_icacheattr( void );
extern unsigned xthal_get_dcacheattr( void );
extern void xthal_set_cacheattr( unsigned );
extern void xthal_set_icacheattr( unsigned );
extern void xthal_set_dcacheattr( unsigned );
/* set cache attribute (access modes) for a range of memory */
extern int xthal_set_region_attribute( void *addr, unsigned size,
unsigned cattr, unsigned flags );
/* Bits of flags parameter to xthal_set_region_attribute(): */
#define XTHAL_CAFLAG_EXPAND 0x000100 /* only expand allowed access to range, don't reduce it */
#define XTHAL_CAFLAG_EXACT 0x000200 /* return error if can't apply change to exact range specified */
#define XTHAL_CAFLAG_NO_PARTIAL 0x000400 /* don't apply change to regions partially covered by range */
#define XTHAL_CAFLAG_NO_AUTO_WB 0x000800 /* don't writeback data after leaving writeback attribute */
#define XTHAL_CAFLAG_NO_AUTO_INV 0x001000 /* don't invalidate after disabling cache (entering bypass) */
/* enable caches */
extern void xthal_icache_enable( void ); /* DEPRECATED */
extern void xthal_dcache_enable( void ); /* DEPRECATED */
/* disable caches */
extern void xthal_icache_disable( void ); /* DEPRECATED */
extern void xthal_dcache_disable( void ); /* DEPRECATED */
/* invalidate the caches */
extern void xthal_icache_all_invalidate( void );
extern void xthal_dcache_all_invalidate( void );
/* write dirty data back */
extern void xthal_dcache_all_writeback( void );
/* write dirty data back and invalidate */
extern void xthal_dcache_all_writeback_inv( void );
/* prefetch and lock specified memory range into cache */
extern void xthal_icache_region_lock( void *addr, unsigned size );
extern void xthal_dcache_region_lock( void *addr, unsigned size );
extern void xthal_icache_line_lock(void *addr);
extern void xthal_dcache_line_lock(void *addr);
/* unlock from cache */
extern void xthal_icache_all_unlock( void );
extern void xthal_dcache_all_unlock( void );
extern void xthal_icache_region_unlock( void *addr, unsigned size );
extern void xthal_dcache_region_unlock( void *addr, unsigned size );
extern void xthal_icache_line_unlock(void *addr);
extern void xthal_dcache_line_unlock(void *addr);
/*----------------------------------------------------------------------
Local Memory ECC/Parity
----------------------------------------------------------------------*/
/* Inject memory errors; flags is bit combination of XTHAL_MEMEP_F_xxx: */
extern void xthal_memep_inject_error(void *addr, int size, int flags);
/*----------------------------------------------------------------------
Memory Management Unit
----------------------------------------------------------------------*/
extern const unsigned char Xthal_have_spanning_way;
extern const unsigned char Xthal_have_identity_map;
extern const unsigned char Xthal_have_mimic_cacheattr;
extern const unsigned char Xthal_have_xlt_cacheattr;
extern const unsigned char Xthal_have_cacheattr;
extern const unsigned char Xthal_have_tlbs;
extern const unsigned char Xthal_mmu_asid_bits; /* 0 .. 8 */
extern const unsigned char Xthal_mmu_asid_kernel;
extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */
extern const unsigned char Xthal_mmu_ring_bits;
extern const unsigned char Xthal_mmu_sr_bits;
extern const unsigned char Xthal_mmu_ca_bits;
extern const unsigned int Xthal_mmu_max_pte_page_size;
extern const unsigned int Xthal_mmu_min_pte_page_size;
extern const unsigned char Xthal_itlb_way_bits;
extern const unsigned char Xthal_itlb_ways;
extern const unsigned char Xthal_itlb_arf_ways;
extern const unsigned char Xthal_dtlb_way_bits;
extern const unsigned char Xthal_dtlb_ways;
extern const unsigned char Xthal_dtlb_arf_ways;
/* Convert between virtual and physical addresses (through static maps only): */
/*** WARNING: these two functions may go away in a future release; don't depend on them! ***/
extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp );
extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached );
#ifdef __cplusplus
}
#endif
#endif /*!_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
/****************************************************************************
EXPERIMENTAL and DEPRECATED Definitions
****************************************************************************/
#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__)
#ifdef __cplusplus
extern "C" {
#endif
#ifdef INCLUDE_DEPRECATED_HAL_CODE
extern const unsigned char Xthal_have_old_exc_arch;
extern const unsigned char Xthal_have_mmu;
extern const unsigned int Xthal_num_regs;
extern const unsigned char Xthal_num_iroms;
extern const unsigned char Xthal_num_irams;
extern const unsigned char Xthal_num_droms;
extern const unsigned char Xthal_num_drams;
extern const unsigned int Xthal_configid0;
extern const unsigned int Xthal_configid1;
#endif
#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE
#define XTHAL_24_BIT_BREAK 0x80000000
#define XTHAL_16_BIT_BREAK 0x40000000
extern const unsigned short Xthal_ill_inst_16[16];
#define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */
#define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */
#define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */
#define XTHAL_RFW_INST 0x00000800
#define XTHAL_RFUE_INST 0x00000400
#define XTHAL_RFI_INST 0x00000200
#define XTHAL_RFE_INST 0x00000100
#define XTHAL_RET_INST 0x00000080
#define XTHAL_BREAK_INST 0x00000040
#define XTHAL_SYSCALL_INST 0x00000020
#define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */
#define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */
#define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */
#define XTHAL_24_BIT_INST 0x00000002
#define XTHAL_16_BIT_INST 0x00000001
typedef struct xthal_state {
unsigned pc;
unsigned ar[16];
unsigned lbeg;
unsigned lend;
unsigned lcount;
unsigned extra_ptr;
unsigned cpregs_ptr[XTHAL_MAX_CPS];
} XTHAL_STATE;
extern unsigned int xthal_inst_type(void *addr);
extern unsigned int xthal_branch_addr(void *addr);
extern unsigned int xthal_get_npc(XTHAL_STATE *user_state);
#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */
#ifdef __cplusplus
}
#endif
#endif /*!_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */
#endif /*XTENSA_HAL_H*/

View File

@ -0,0 +1,60 @@
/* Copyright (c) 2004-2006 by Tensilica Inc. ALL RIGHTS RESERVED.
/ These coded instructions, statements, and computer programs are the
/ copyrighted works and confidential proprietary information of Tensilica Inc.
/ They may not be modified, copied, reproduced, distributed, or disclosed to
/ third parties in any manner, medium, or form, in whole or in part, without
/ the prior written consent of Tensilica Inc.
*/
/* sim.h
*
* Definitions and prototypes for specific ISS SIMCALLs
* (ie. outside the standard C library).
*/
#ifndef _INC_SIM_H_
#define _INC_SIM_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Shortcuts for enabling/disabling profiling in the Xtensa ISS */
extern void xt_iss_profile_enable(void);
extern void xt_iss_profile_disable(void);
/* Shortcut for setting the trace level in the Xtensa ISS */
extern void xt_iss_trace_level(unsigned level);
/* Generic interface for passing client commands in the Xtensa ISS:
* returns 0 on success, -1 on failure.
*/
extern int xt_iss_client_command(const char *client, const char *command);
/* Interface for switching simulation modes in the Xtensa ISS:
* returns 0 on success, -1 on failure.
*/
#define XT_ISS_CYCLE_ACCURATE 0
#define XT_ISS_FUNCTIONAL 1
extern int xt_iss_switch_mode(int mode);
/* Interface for waiting on a system synchronization event */
extern void xt_iss_event_wait(unsigned event_id);
/* Interface for firing a system synchronization event */
extern void xt_iss_event_fire(unsigned event_id);
/* Interface for invoking a user simcall action,
* which can be registered in XTMP or XTSC.
*/
extern int xt_iss_simcall(int arg1, int arg2, int arg3,
int arg4, int arg5, int arg6);
#ifdef __cplusplus
}
#endif
#endif /*_INC_SIM_H_*/

View File

@ -0,0 +1,139 @@
/* Error numbers for Xtensa ISS semihosting. */
/* Copyright (c) 2003 by Tensilica Inc. ALL RIGHTS RESERVED.
These coded instructions, statements, and computer programs are the
copyrighted works and confidential proprietary information of Tensilica Inc.
They may not be modified, copied, reproduced, distributed, or disclosed to
third parties in any manner, medium, or form, in whole or in part, without
the prior written consent of Tensilica Inc. */
#ifndef _SIMCALL_ERRNO_H
#define _SIMCALL_ERRNO_H
/* Define the error numbers (using the default newlib values) with prefixes
so they can be used in ISS without conflicting with the host values. */
#define _SIMC_EPERM 1
#define _SIMC_ENOENT 2
#define _SIMC_ESRCH 3
#define _SIMC_EINTR 4
#define _SIMC_EIO 5
#define _SIMC_ENXIO 6
#define _SIMC_E2BIG 7
#define _SIMC_ENOEXEC 8
#define _SIMC_EBADF 9
#define _SIMC_ECHILD 10
#define _SIMC_EAGAIN 11
#define _SIMC_ENOMEM 12
#define _SIMC_EACCES 13
#define _SIMC_EFAULT 14
#define _SIMC_ENOTBLK 15
#define _SIMC_EBUSY 16
#define _SIMC_EEXIST 17
#define _SIMC_EXDEV 18
#define _SIMC_ENODEV 19
#define _SIMC_ENOTDIR 20
#define _SIMC_EISDIR 21
#define _SIMC_EINVAL 22
#define _SIMC_ENFILE 23
#define _SIMC_EMFILE 24
#define _SIMC_ENOTTY 25
#define _SIMC_ETXTBSY 26
#define _SIMC_EFBIG 27
#define _SIMC_ENOSPC 28
#define _SIMC_ESPIPE 29
#define _SIMC_EROFS 30
#define _SIMC_EMLINK 31
#define _SIMC_EPIPE 32
#define _SIMC_EDOM 33
#define _SIMC_ERANGE 34
#define _SIMC_ENOMSG 35
#define _SIMC_EIDRM 36
#define _SIMC_ECHRNG 37
#define _SIMC_EL2NSYNC 38
#define _SIMC_EL3HLT 39
#define _SIMC_EL3RST 40
#define _SIMC_ELNRNG 41
#define _SIMC_EUNATCH 42
#define _SIMC_ENOCSI 43
#define _SIMC_EL2HLT 44
#define _SIMC_EDEADLK 45
#define _SIMC_ENOLCK 46
#define _SIMC_EBADE 50
#define _SIMC_EBADR 51
#define _SIMC_EXFULL 52
#define _SIMC_ENOANO 53
#define _SIMC_EBADRQC 54
#define _SIMC_EBADSLT 55
#define _SIMC_EDEADLOCK 56
#define _SIMC_EBFONT 57
#define _SIMC_ENOSTR 60
#define _SIMC_ENODATA 61
#define _SIMC_ETIME 62
#define _SIMC_ENOSR 63
#define _SIMC_ENONET 64
#define _SIMC_ENOPKG 65
#define _SIMC_EREMOTE 66
#define _SIMC_ENOLINK 67
#define _SIMC_EADV 68
#define _SIMC_ESRMNT 69
#define _SIMC_ECOMM 70
#define _SIMC_EPROTO 71
#define _SIMC_EMULTIHOP 74
#define _SIMC_ELBIN 75
#define _SIMC_EDOTDOT 76
#define _SIMC_EBADMSG 77
#define _SIMC_EFTYPE 79
#define _SIMC_ENOTUNIQ 80
#define _SIMC_EBADFD 81
#define _SIMC_EREMCHG 82
#define _SIMC_ELIBACC 83
#define _SIMC_ELIBBAD 84
#define _SIMC_ELIBSCN 85
#define _SIMC_ELIBMAX 86
#define _SIMC_ELIBEXEC 87
#define _SIMC_ENOSYS 88
#define _SIMC_ENMFILE 89
#define _SIMC_ENOTEMPTY 90
#define _SIMC_ENAMETOOLONG 91
#define _SIMC_ELOOP 92
#define _SIMC_EOPNOTSUPP 95
#define _SIMC_EPFNOSUPPORT 96
#define _SIMC_ECONNRESET 104
#define _SIMC_ENOBUFS 105
#define _SIMC_EAFNOSUPPORT 106
#define _SIMC_EPROTOTYPE 107
#define _SIMC_ENOTSOCK 108
#define _SIMC_ENOPROTOOPT 109
#define _SIMC_ESHUTDOWN 110
#define _SIMC_ECONNREFUSED 111
#define _SIMC_EADDRINUSE 112
#define _SIMC_ECONNABORTED 113
#define _SIMC_ENETUNREACH 114
#define _SIMC_ENETDOWN 115
#define _SIMC_ETIMEDOUT 116
#define _SIMC_EHOSTDOWN 117
#define _SIMC_EHOSTUNREACH 118
#define _SIMC_EINPROGRESS 119
#define _SIMC_EALREADY 120
#define _SIMC_EDESTADDRREQ 121
#define _SIMC_EMSGSIZE 122
#define _SIMC_EPROTONOSUPPORT 123
#define _SIMC_ESOCKTNOSUPPORT 124
#define _SIMC_EADDRNOTAVAIL 125
#define _SIMC_ENETRESET 126
#define _SIMC_EISCONN 127
#define _SIMC_ENOTCONN 128
#define _SIMC_ETOOMANYREFS 129
#define _SIMC_EPROCLIM 130
#define _SIMC_EUSERS 131
#define _SIMC_EDQUOT 132
#define _SIMC_ESTALE 133
#define _SIMC_ENOTSUP 134
#define _SIMC_ENOMEDIUM 135
#define _SIMC_ENOSHARE 136
#define _SIMC_ECASECLASH 137
#define _SIMC_EILSEQ 138
#define _SIMC_EOVERFLOW 139
#endif /* ! _SIMCALL_ERRNO_H */

View File

@ -0,0 +1,21 @@
/* File control operations for Xtensa ISS semihosting. */
/* Copyright (c) 2003 by Tensilica Inc. ALL RIGHTS RESERVED.
These coded instructions, statements, and computer programs are the
copyrighted works and confidential proprietary information of Tensilica Inc.
They may not be modified, copied, reproduced, distributed, or disclosed to
third parties in any manner, medium, or form, in whole or in part, without
the prior written consent of Tensilica Inc. */
#ifndef _SIMCALL_FCNTL_H
#define _SIMCALL_FCNTL_H
#define _SIMC_O_APPEND 0x0008
#define _SIMC_O_NONBLOCK 0x0080
#define _SIMC_O_CREAT 0x0100
#define _SIMC_O_TRUNC 0x0200
#define _SIMC_O_EXCL 0x0400
#define _SIMC_O_TEXT 0x4000
#define _SIMC_O_BINARY 0x8000
#endif /* ! _SIMCALL_FCNTL_H */

View File

@ -0,0 +1,188 @@
/*
* simcall.h - Simulator call numbers
*
* Software that runs on a simulated Xtensa processor using
* the instruction set simulator (ISS) can invoke simulator
* services using the SIMCALL instruction. The a2 register
* is set prior to executing SIMCALL to a "simcall number",
* indicating which service to invoke. This file defines the
* simcall numbers defined and/or supported by the Xtensa ISS.
*
* IMPORTANT NOTE: These numbers are highly subject to change!
*
* Copyright (c) 2002-2007 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
#ifndef SIMCALL_INCLUDED
#define SIMCALL_INCLUDED
/*
* System call like services offered by the simulator host.
* These are modeled after the Linux 2.4 kernel system calls
* for Xtensa processors. However not all system calls and
* not all functionality of a given system call are implemented,
* or necessarily have well defined or equivalent semantics in
* the context of a simulation (as opposed to a Unix kernel).
*
* These services behave largely as if they had been invoked
* as a task in the simulator host's operating system
* (eg. files accessed are those of the simulator host).
* However, these SIMCALLs model a virtual operating system
* so that various definitions, bit assignments etc
* (eg. open mode bits, errno values, etc) are independent
* of the host operating system used to run the simulation.
* Rather these definitions are specific to the Xtensa ISS.
* This way Xtensa ISA code written to use these SIMCALLs
* can (in principle) be simulated on any host.
*
* Up to 6 parameters are passed in registers a3 to a8
* (note the 6th parameter isn't passed on the stack,
* unlike windowed function calling conventions).
* The return value is in a2. A negative value in the
* range -4096 to -1 indicates a negated error code to be
* reported in errno with a return value of -1, otherwise
* the value in a2 is returned as is.
*/
/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */
#define SYS_nop 0 /* n/a - setup; used to flush register windows */
#define SYS_exit 1 /*x*/
#define SYS_fork 2
#define SYS_read 3 /*x*/
#define SYS_write 4 /*x*/
#define SYS_open 5 /*x*/
#define SYS_close 6 /*x*/
#define SYS_rename 7 /*x 38 - waitpid */
#define SYS_creat 8 /*x*/
#define SYS_link 9 /*x (not implemented on WIN32) */
#define SYS_unlink 10 /*x*/
#define SYS_execv 11 /* n/a - execve */
#define SYS_execve 12 /* 11 - chdir */
#define SYS_pipe 13 /* 42 - time */
#define SYS_stat 14 /* 106 - mknod */
#define SYS_chmod 15
#define SYS_chown 16 /* 202 - lchown */
#define SYS_utime 17 /* 30 - break */
#define SYS_wait 18 /* n/a - oldstat */
#define SYS_lseek 19 /*x*/
#define SYS_getpid 20
#define SYS_isatty 21 /* n/a - mount */
#define SYS_fstat 22 /* 108 - oldumount */
#define SYS_time 23 /* 13 - setuid */
#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
#define SYS_socket 26
#define SYS_sendto 27
#define SYS_recvfrom 28
#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
#define SYS_bind 30
#define SYS_ioctl 31
/*
* Other...
*/
#define SYS_iss_argc 1000 /* returns value of argc */
#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
#define SYS_memset 1004 /* fill a range of memory (fast) */
/*
* SIMCALLs for the ferret memory debugger. All are invoked by
* libferret.a ... ( Xtensa/Target-Libs/ferret )
*/
#define SYS_ferret 1010
#define SYS_malloc 1011
#define SYS_free 1012
#define SYS_more_heap 1013
#define SYS_no_heap 1014
#define SYS_enter_ferret 1015
#define SYS_leave_ferret 1016
/*
* SIMCALLs for ISS client commands
*/
#define SYS_profile_enable 1020
#define SYS_profile_disable 1021
#define SYS_trace_level 1022
#define SYS_client_command 1023
/*
* SIMCALL for simulation mode switching
*/
#define SYS_sim_mode_switch 1030
/*
* SIMCALLs for XTMP/XTSC event notify and core stall
*/
#define SYS_event_fire 1040
#define SYS_event_stall 1041
/*
* SIMCALLs for callbacks registered in XTMP/XTSC
*/
#define SYS_callback_first 100
#define SYS_callback_last 999
/*
* User defined simcall
*/
#define SYS_user_simcall 100
#define SYS_xmpa_errinfo 200
#define SYS_xmpa_proc_status 201
#define SYS_xmpa_proc_start 202
#define SYS_xmpa_proc_stop 203
#define SYS_xmpa_proc_mem_read 204
#define SYS_xmpa_proc_mem_write 205
#define SYS_xmpa_proc_mem_fill 206
#define SYS_xmpa_proc_reg_read 207
#define SYS_xmpa_proc_reg_write 208
/*
* Extra SIMCALLs for GDB:
*/
#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points
to a break.n/break, regardless of cause! */
#define SYS_xmon_out -2 /* invoked by XMON: ... */
#define SYS_xmon_in -3 /* invoked by XMON: ... */
#define SYS_xmon_flush -4 /* invoked by XMON: ... */
#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */
#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */
#define SYS_xmon_init -7 /* invoked by XMON: ... */
#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */
#define SYS_unhandled_kernel_exc -9 /* invoked by XTOS for unhandled kernel exceptions */
#define SYS_unhandled_user_exc -10 /* invoked by XTOS for unhandled user exceptions */
#define SYS_unhandled_double_exc -11 /* invoked by XTOS for unhandled double exceptions */
#define SYS_unhandled_highpri_interrupt -12 /* invoked by XTOS for unhandled high-priority interrupts */
/*
* SIMCALLs for vxWorks xtiss BSP:
*/
#define SYS_setup_ppp_pipes -83
#define SYS_log_msg -84
/*
* SYS_select_one specifiers
*/
#define XTISS_SELECT_ONE_READ 1
#define XTISS_SELECT_ONE_WRITE 2
#define XTISS_SELECT_ONE_EXCEPT 3
/*
* SIMCALL for client calling arbitrary code in a client plug in.
* see clients/xcc_instr to see how this works.
*/
#define SYS_client 0xC0DECAFE
#endif /* !SIMCALL_INCLUDED */

View File

@ -0,0 +1,138 @@
/*
* Xtensa Special Register symbolic names
*/
/* $Id: //depot/rel/Boreal/Xtensa/OS/include/xtensa/specreg.h#2 $ */
/*
* Copyright (c) 2005-2010 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTENSA_SPECREG_H
#define XTENSA_SPECREG_H
/* Special registers: */
#define LBEG 0
#define LEND 1
#define LCOUNT 2
#define SAR 3
#define BR 4
#define LITBASE 5
#define SCOMPARE1 12
#define ACCLO 16
#define ACCHI 17
#define MR_0 32
#define MR_1 33
#define MR_2 34
#define MR_3 35
#define PREFCTL 40
#define WINDOWBASE 72
#define WINDOWSTART 73
#define PTEVADDR 83
#define RASID 90
#define ITLBCFG 91
#define DTLBCFG 92
#define IBREAKENABLE 96
#define CACHEATTR 98
#define DDR 104
#define IBREAKA_0 128
#define IBREAKA_1 129
#define DBREAKA_0 144
#define DBREAKA_1 145
#define DBREAKC_0 160
#define DBREAKC_1 161
#define EPC_1 177
#define EPC_2 178
#define EPC_3 179
#define EPC_4 180
#define EPC_5 181
#define EPC_6 182
#define EPC_7 183
#define DEPC 192
#define EPS_2 194
#define EPS_3 195
#define EPS_4 196
#define EPS_5 197
#define EPS_6 198
#define EPS_7 199
#define EXCSAVE_1 209
#define EXCSAVE_2 210
#define EXCSAVE_3 211
#define EXCSAVE_4 212
#define EXCSAVE_5 213
#define EXCSAVE_6 214
#define EXCSAVE_7 215
#define CPENABLE 224
#define INTERRUPT 226
#define INTREAD INTERRUPT /* alternate name for backward compatibility */
#define INTSET INTERRUPT /* alternate name for backward compatibility */
#define INTCLEAR 227
#define INTENABLE 228
#define PS 230
#define VECBASE 231
#define EXCCAUSE 232
#define DEBUGCAUSE 233
#define CCOUNT 234
#define PRID 235
#define ICOUNT 236
#define ICOUNTLEVEL 237
#define EXCVADDR 238
#define CCOMPARE_0 240
#define CCOMPARE_1 241
#define CCOMPARE_2 242
#define MISC_REG_0 244
#define MISC_REG_1 245
#define MISC_REG_2 246
#define MISC_REG_3 247
/* Special cases (bases of special register series): */
#define MR 32
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPS 192
#define EXCSAVE 208
#define CCOMPARE 240
#define MISC_REG 244
/* Tensilica-defined user registers: */
#if 0
/*#define ... 21..24 */ /* (545CK) */
/*#define ... 140..143 */ /* (545CK) */
#define EXPSTATE 230 /* Diamond */
#define THREADPTR 231 /* threadptr option */
#define FCR 232 /* FPU */
#define FSR 233 /* FPU */
#define AE_OVF_SAR 240 /* HiFi2 */
#define AE_BITHEAD 241 /* HiFi2 */
#define AE_TS_FTS_BU_BP 242 /* HiFi2 */
#define AE_SD_NO 243 /* HiFi2 */
#define VSAR 240 /* VectraLX */
#define ROUND_LO 242 /* VectraLX */
#define ROUND_HI 243 /* VectraLX */
#define CBEGIN 246 /* VectraLX */
#define CEND 247 /* VectraLX */
#endif
#endif /* XTENSA_SPECREG_H */

View File

@ -0,0 +1,24 @@
/* Definitions for the 32-bit Integer Multiply Option. */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2009 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* NOTE: This file exists only for backward compatibility with RB-200X.x
and earlier Xtensa releases. Starting with RC-2009.0 you should use
<xtensa/tie/xt_mul.h>. */
#ifndef _XTENSA_xt_MUL32_HEADER
#define _XTENSA_xt_MUL32_HEADER
#ifdef __XTENSA__
#include <xtensa/tie/xt_mul.h>
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_MUL32_HEADER */

View File

@ -0,0 +1,254 @@
/* Definitions for the xt_core TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_core_HEADER
#define _XTENSA_xt_core_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_core_ILL(void);
extern void _TIE_xt_core_NOP(void);
extern void _TIE_xt_core_MEMW(void);
extern void _TIE_xt_core_EXTW(void);
extern void _TIE_xt_core_ISYNC(void);
extern void _TIE_xt_core_DSYNC(void);
extern void _TIE_xt_core_ESYNC(void);
extern void _TIE_xt_core_RSYNC(void);
extern unsigned _TIE_xt_core_RSR_176(void);
extern void _TIE_xt_core_WSR_176(unsigned art);
extern unsigned _TIE_xt_core_RSR_208(void);
extern unsigned _TIE_xt_core_uint32_loadi(const unsigned * p, immediate o);
extern void _TIE_xt_core_uint32_storei(unsigned c, unsigned * p, immediate o);
extern unsigned _TIE_xt_core_uint32_move(unsigned b);
extern int _TIE_xt_core_ADDI(int s, immediate i);
extern int _TIE_xt_core_OR(int s, int t);
extern int _TIE_xt_core_L32I(const int * p, immediate i);
extern void _TIE_xt_core_S32I(int r, int * p, immediate i);
extern unsigned char _TIE_xt_core_L8UI(const unsigned char * p, immediate i);
extern void _TIE_xt_core_S8I(signed char r, signed char * p, immediate i);
extern unsigned short _TIE_xt_core_L16UI(const unsigned short * p, immediate i);
extern short _TIE_xt_core_L16SI(const short * p, immediate i);
extern void _TIE_xt_core_S16I(short r, short * p, immediate i);
extern int _TIE_xt_core_ADDMI(int s, immediate i);
extern int _TIE_xt_core_ADD(int s, int t);
extern int _TIE_xt_core_ADDX2(int s, int t);
extern int _TIE_xt_core_ADDX4(int s, int t);
extern int _TIE_xt_core_ADDX8(int s, int t);
extern int _TIE_xt_core_SUB(int s, int t);
extern int _TIE_xt_core_SUBX2(int s, int t);
extern int _TIE_xt_core_SUBX4(int s, int t);
extern int _TIE_xt_core_SUBX8(int s, int t);
extern int _TIE_xt_core_AND(int s, int t);
extern int _TIE_xt_core_XOR(int s, int t);
extern unsigned _TIE_xt_core_EXTUI(unsigned t, immediate i, immediate o);
extern int _TIE_xt_core_MOVI(immediate i);
extern void _TIE_xt_core_MOVEQZ(int r /*inout*/, int s, int t);
extern void _TIE_xt_core_MOVNEZ(int r /*inout*/, int s, int t);
extern void _TIE_xt_core_MOVLTZ(int r /*inout*/, int s, int t);
extern void _TIE_xt_core_MOVGEZ(int r /*inout*/, int s, int t);
extern int _TIE_xt_core_NEG(int t);
extern int _TIE_xt_core_ABS(int t);
extern void _TIE_xt_core_SSR(int s);
extern void _TIE_xt_core_SSL(int s);
extern void _TIE_xt_core_SSA8L(int s);
extern void _TIE_xt_core_SSA8B(int s);
extern void _TIE_xt_core_SSAI(immediate i);
extern int _TIE_xt_core_SLL(int s);
extern int _TIE_xt_core_SRC(int s, int t);
extern unsigned _TIE_xt_core_SRL(unsigned t);
extern int _TIE_xt_core_SRA(int t);
extern int _TIE_xt_core_SLLI(int s, immediate i);
extern int _TIE_xt_core_SRAI(int t, immediate i);
extern unsigned _TIE_xt_core_SRLI(unsigned t, immediate i);
extern int _TIE_xt_core_SSAI_SRC(int src1, int src2, immediate amount);
extern int _TIE_xt_core_SSR_SRC(int src1, int src2, int amount);
extern int _TIE_xt_core_WSR_SAR_SRC(int src1, int src2, int amount);
extern int _TIE_xt_core_SSR_SRA(int src, int amount);
extern unsigned _TIE_xt_core_SSR_SRL(unsigned src, int amount);
extern int _TIE_xt_core_SSL_SLL(int src, int amount);
extern int _TIE_xt_core_RSIL(immediate t);
extern unsigned _TIE_xt_core_RSR_SAR(void);
extern void _TIE_xt_core_WSR_SAR(unsigned t);
extern void _TIE_xt_core_XSR_SAR(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_LITBASE(void);
extern void _TIE_xt_core_WSR_LITBASE(unsigned t);
extern void _TIE_xt_core_XSR_LITBASE(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_PS(void);
extern void _TIE_xt_core_WSR_PS(unsigned t);
extern void _TIE_xt_core_XSR_PS(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EPC1(void);
extern void _TIE_xt_core_WSR_EPC1(unsigned t);
extern void _TIE_xt_core_XSR_EPC1(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EXCSAVE1(void);
extern void _TIE_xt_core_WSR_EXCSAVE1(unsigned t);
extern void _TIE_xt_core_XSR_EXCSAVE1(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EPC2(void);
extern void _TIE_xt_core_WSR_EPC2(unsigned t);
extern void _TIE_xt_core_XSR_EPC2(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EXCSAVE2(void);
extern void _TIE_xt_core_WSR_EXCSAVE2(unsigned t);
extern void _TIE_xt_core_XSR_EXCSAVE2(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EPC3(void);
extern void _TIE_xt_core_WSR_EPC3(unsigned t);
extern void _TIE_xt_core_XSR_EPC3(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EXCSAVE3(void);
extern void _TIE_xt_core_WSR_EXCSAVE3(unsigned t);
extern void _TIE_xt_core_XSR_EXCSAVE3(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_VECBASE(void);
extern void _TIE_xt_core_WSR_VECBASE(unsigned t);
extern void _TIE_xt_core_XSR_VECBASE(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EPS2(void);
extern void _TIE_xt_core_WSR_EPS2(unsigned t);
extern void _TIE_xt_core_XSR_EPS2(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EPS3(void);
extern void _TIE_xt_core_WSR_EPS3(unsigned t);
extern void _TIE_xt_core_XSR_EPS3(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EXCCAUSE(void);
extern void _TIE_xt_core_WSR_EXCCAUSE(unsigned t);
extern void _TIE_xt_core_XSR_EXCCAUSE(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_EXCVADDR(void);
extern void _TIE_xt_core_WSR_EXCVADDR(unsigned t);
extern void _TIE_xt_core_XSR_EXCVADDR(unsigned t /*inout*/);
extern unsigned _TIE_xt_core_RSR_DEPC(void);
extern void _TIE_xt_core_WSR_DEPC(unsigned t);
extern void _TIE_xt_core_XSR_DEPC(unsigned t /*inout*/);
extern int _TIE_xt_core_RSR_PRID(void);
#define XT_ILL _TIE_xt_core_ILL
#define XT_NOP _TIE_xt_core_NOP
#define XT_MEMW _TIE_xt_core_MEMW
#define XT_EXTW _TIE_xt_core_EXTW
#define XT_ISYNC _TIE_xt_core_ISYNC
#define XT_DSYNC _TIE_xt_core_DSYNC
#define XT_ESYNC _TIE_xt_core_ESYNC
#define XT_RSYNC _TIE_xt_core_RSYNC
#define XT_RSR_176 _TIE_xt_core_RSR_176
#define XT_WSR_176 _TIE_xt_core_WSR_176
#define XT_RSR_208 _TIE_xt_core_RSR_208
#define XT_uint32_loadi _TIE_xt_core_uint32_loadi
#define XT_uint32_storei _TIE_xt_core_uint32_storei
#define XT_uint32_move _TIE_xt_core_uint32_move
#define XT_ADDI _TIE_xt_core_ADDI
#define XT_OR _TIE_xt_core_OR
#define XT_L32I _TIE_xt_core_L32I
#define XT_S32I _TIE_xt_core_S32I
#define XT_L8UI _TIE_xt_core_L8UI
#define XT_S8I _TIE_xt_core_S8I
#define XT_L16UI _TIE_xt_core_L16UI
#define XT_L16SI _TIE_xt_core_L16SI
#define XT_S16I _TIE_xt_core_S16I
#define XT_ADDMI _TIE_xt_core_ADDMI
#define XT_ADD _TIE_xt_core_ADD
#define XT_ADDX2 _TIE_xt_core_ADDX2
#define XT_ADDX4 _TIE_xt_core_ADDX4
#define XT_ADDX8 _TIE_xt_core_ADDX8
#define XT_SUB _TIE_xt_core_SUB
#define XT_SUBX2 _TIE_xt_core_SUBX2
#define XT_SUBX4 _TIE_xt_core_SUBX4
#define XT_SUBX8 _TIE_xt_core_SUBX8
#define XT_AND _TIE_xt_core_AND
#define XT_XOR _TIE_xt_core_XOR
#define XT_EXTUI _TIE_xt_core_EXTUI
#define XT_MOVI _TIE_xt_core_MOVI
#define XT_MOVEQZ _TIE_xt_core_MOVEQZ
#define XT_MOVNEZ _TIE_xt_core_MOVNEZ
#define XT_MOVLTZ _TIE_xt_core_MOVLTZ
#define XT_MOVGEZ _TIE_xt_core_MOVGEZ
#define XT_NEG _TIE_xt_core_NEG
#define XT_ABS _TIE_xt_core_ABS
#define XT_SSR _TIE_xt_core_SSR
#define XT_SSL _TIE_xt_core_SSL
#define XT_SSA8L _TIE_xt_core_SSA8L
#define XT_SSA8B _TIE_xt_core_SSA8B
#define XT_SSAI _TIE_xt_core_SSAI
#define XT_SLL _TIE_xt_core_SLL
#define XT_SRC _TIE_xt_core_SRC
#define XT_SRL _TIE_xt_core_SRL
#define XT_SRA _TIE_xt_core_SRA
#define XT_SLLI _TIE_xt_core_SLLI
#define XT_SRAI _TIE_xt_core_SRAI
#define XT_SRLI _TIE_xt_core_SRLI
#define XT_SSAI_SRC _TIE_xt_core_SSAI_SRC
#define XT_SSR_SRC _TIE_xt_core_SSR_SRC
#define XT_WSR_SAR_SRC _TIE_xt_core_WSR_SAR_SRC
#define XT_SSR_SRA _TIE_xt_core_SSR_SRA
#define XT_SSR_SRL _TIE_xt_core_SSR_SRL
#define XT_SSL_SLL _TIE_xt_core_SSL_SLL
#define XT_RSIL _TIE_xt_core_RSIL
#define XT_RSR_SAR _TIE_xt_core_RSR_SAR
#define XT_WSR_SAR _TIE_xt_core_WSR_SAR
#define XT_XSR_SAR _TIE_xt_core_XSR_SAR
#define XT_RSR_LITBASE _TIE_xt_core_RSR_LITBASE
#define XT_WSR_LITBASE _TIE_xt_core_WSR_LITBASE
#define XT_XSR_LITBASE _TIE_xt_core_XSR_LITBASE
#define XT_RSR_PS _TIE_xt_core_RSR_PS
#define XT_WSR_PS _TIE_xt_core_WSR_PS
#define XT_XSR_PS _TIE_xt_core_XSR_PS
#define XT_RSR_EPC1 _TIE_xt_core_RSR_EPC1
#define XT_WSR_EPC1 _TIE_xt_core_WSR_EPC1
#define XT_XSR_EPC1 _TIE_xt_core_XSR_EPC1
#define XT_RSR_EXCSAVE1 _TIE_xt_core_RSR_EXCSAVE1
#define XT_WSR_EXCSAVE1 _TIE_xt_core_WSR_EXCSAVE1
#define XT_XSR_EXCSAVE1 _TIE_xt_core_XSR_EXCSAVE1
#define XT_RSR_EPC2 _TIE_xt_core_RSR_EPC2
#define XT_WSR_EPC2 _TIE_xt_core_WSR_EPC2
#define XT_XSR_EPC2 _TIE_xt_core_XSR_EPC2
#define XT_RSR_EXCSAVE2 _TIE_xt_core_RSR_EXCSAVE2
#define XT_WSR_EXCSAVE2 _TIE_xt_core_WSR_EXCSAVE2
#define XT_XSR_EXCSAVE2 _TIE_xt_core_XSR_EXCSAVE2
#define XT_RSR_EPC3 _TIE_xt_core_RSR_EPC3
#define XT_WSR_EPC3 _TIE_xt_core_WSR_EPC3
#define XT_XSR_EPC3 _TIE_xt_core_XSR_EPC3
#define XT_RSR_EXCSAVE3 _TIE_xt_core_RSR_EXCSAVE3
#define XT_WSR_EXCSAVE3 _TIE_xt_core_WSR_EXCSAVE3
#define XT_XSR_EXCSAVE3 _TIE_xt_core_XSR_EXCSAVE3
#define XT_RSR_VECBASE _TIE_xt_core_RSR_VECBASE
#define XT_WSR_VECBASE _TIE_xt_core_WSR_VECBASE
#define XT_XSR_VECBASE _TIE_xt_core_XSR_VECBASE
#define XT_RSR_EPS2 _TIE_xt_core_RSR_EPS2
#define XT_WSR_EPS2 _TIE_xt_core_WSR_EPS2
#define XT_XSR_EPS2 _TIE_xt_core_XSR_EPS2
#define XT_RSR_EPS3 _TIE_xt_core_RSR_EPS3
#define XT_WSR_EPS3 _TIE_xt_core_WSR_EPS3
#define XT_XSR_EPS3 _TIE_xt_core_XSR_EPS3
#define XT_RSR_EXCCAUSE _TIE_xt_core_RSR_EXCCAUSE
#define XT_WSR_EXCCAUSE _TIE_xt_core_WSR_EXCCAUSE
#define XT_XSR_EXCCAUSE _TIE_xt_core_XSR_EXCCAUSE
#define XT_RSR_EXCVADDR _TIE_xt_core_RSR_EXCVADDR
#define XT_WSR_EXCVADDR _TIE_xt_core_WSR_EXCVADDR
#define XT_XSR_EXCVADDR _TIE_xt_core_XSR_EXCVADDR
#define XT_RSR_DEPC _TIE_xt_core_RSR_DEPC
#define XT_WSR_DEPC _TIE_xt_core_WSR_DEPC
#define XT_XSR_DEPC _TIE_xt_core_XSR_DEPC
#define XT_RSR_PRID _TIE_xt_core_RSR_PRID
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_core_HEADER */

View File

@ -0,0 +1,93 @@
/* Definitions for the xt_debug TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_debug_HEADER
#define _XTENSA_xt_debug_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_debug_BREAK(immediate imms, immediate immt);
extern void _TIE_xt_debug_BREAK_N(immediate imms);
extern unsigned _TIE_xt_debug_RSR_DBREAKA0(void);
extern void _TIE_xt_debug_WSR_DBREAKA0(unsigned art);
extern void _TIE_xt_debug_XSR_DBREAKA0(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_DBREAKC0(void);
extern void _TIE_xt_debug_WSR_DBREAKC0(unsigned art);
extern void _TIE_xt_debug_XSR_DBREAKC0(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_IBREAKA0(void);
extern void _TIE_xt_debug_WSR_IBREAKA0(unsigned art);
extern void _TIE_xt_debug_XSR_IBREAKA0(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_IBREAKENABLE(void);
extern void _TIE_xt_debug_WSR_IBREAKENABLE(unsigned art);
extern void _TIE_xt_debug_XSR_IBREAKENABLE(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_DEBUGCAUSE(void);
extern void _TIE_xt_debug_WSR_DEBUGCAUSE(unsigned art);
extern void _TIE_xt_debug_XSR_DEBUGCAUSE(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_ICOUNT(void);
extern void _TIE_xt_debug_WSR_ICOUNT(unsigned art);
extern void _TIE_xt_debug_XSR_ICOUNT(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_ICOUNTLEVEL(void);
extern void _TIE_xt_debug_WSR_ICOUNTLEVEL(unsigned art);
extern void _TIE_xt_debug_XSR_ICOUNTLEVEL(unsigned art /*inout*/);
extern unsigned _TIE_xt_debug_RSR_DDR(void);
extern void _TIE_xt_debug_WSR_DDR(unsigned art);
extern void _TIE_xt_debug_XSR_DDR(unsigned art /*inout*/);
#define XT_BREAK _TIE_xt_debug_BREAK
#define XT_BREAK_N _TIE_xt_debug_BREAK_N
#define XT_RSR_DBREAKA0 _TIE_xt_debug_RSR_DBREAKA0
#define XT_WSR_DBREAKA0 _TIE_xt_debug_WSR_DBREAKA0
#define XT_XSR_DBREAKA0 _TIE_xt_debug_XSR_DBREAKA0
#define XT_RSR_DBREAKC0 _TIE_xt_debug_RSR_DBREAKC0
#define XT_WSR_DBREAKC0 _TIE_xt_debug_WSR_DBREAKC0
#define XT_XSR_DBREAKC0 _TIE_xt_debug_XSR_DBREAKC0
#define XT_RSR_IBREAKA0 _TIE_xt_debug_RSR_IBREAKA0
#define XT_WSR_IBREAKA0 _TIE_xt_debug_WSR_IBREAKA0
#define XT_XSR_IBREAKA0 _TIE_xt_debug_XSR_IBREAKA0
#define XT_RSR_IBREAKENABLE _TIE_xt_debug_RSR_IBREAKENABLE
#define XT_WSR_IBREAKENABLE _TIE_xt_debug_WSR_IBREAKENABLE
#define XT_XSR_IBREAKENABLE _TIE_xt_debug_XSR_IBREAKENABLE
#define XT_RSR_DEBUGCAUSE _TIE_xt_debug_RSR_DEBUGCAUSE
#define XT_WSR_DEBUGCAUSE _TIE_xt_debug_WSR_DEBUGCAUSE
#define XT_XSR_DEBUGCAUSE _TIE_xt_debug_XSR_DEBUGCAUSE
#define XT_RSR_ICOUNT _TIE_xt_debug_RSR_ICOUNT
#define XT_WSR_ICOUNT _TIE_xt_debug_WSR_ICOUNT
#define XT_XSR_ICOUNT _TIE_xt_debug_XSR_ICOUNT
#define XT_RSR_ICOUNTLEVEL _TIE_xt_debug_RSR_ICOUNTLEVEL
#define XT_WSR_ICOUNTLEVEL _TIE_xt_debug_WSR_ICOUNTLEVEL
#define XT_XSR_ICOUNTLEVEL _TIE_xt_debug_XSR_ICOUNTLEVEL
#define XT_RSR_DDR _TIE_xt_debug_RSR_DDR
#define XT_WSR_DDR _TIE_xt_debug_WSR_DDR
#define XT_XSR_DDR _TIE_xt_debug_XSR_DDR
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_debug_HEADER */

View File

@ -0,0 +1,57 @@
/* Definitions for the xt_density TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_density_HEADER
#define _XTENSA_xt_density_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_density_ILL_N(void);
extern void _TIE_xt_density_NOP_N(void);
extern int _TIE_xt_density_L32I_N(const int * p, immediate i);
extern void _TIE_xt_density_S32I_N(int t, int * p, immediate i);
extern int _TIE_xt_density_ADD_N(int s, int t);
extern int _TIE_xt_density_ADDI_N(int s, immediate i);
extern int _TIE_xt_density_MOV_N(int s);
extern int _TIE_xt_density_MOVI_N(immediate i);
#define XT_ILL_N _TIE_xt_density_ILL_N
#define XT_NOP_N _TIE_xt_density_NOP_N
#define XT_L32I_N _TIE_xt_density_L32I_N
#define XT_S32I_N _TIE_xt_density_S32I_N
#define XT_ADD_N _TIE_xt_density_ADD_N
#define XT_ADDI_N _TIE_xt_density_ADDI_N
#define XT_MOV_N _TIE_xt_density_MOV_N
#define XT_MOVI_N _TIE_xt_density_MOVI_N
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_density_HEADER */

View File

@ -0,0 +1,46 @@
/* Definitions for the xt_exceptions TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_exceptions_HEADER
#define _XTENSA_xt_exceptions_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_exceptions_EXCW(void);
extern void _TIE_xt_exceptions_SYSCALL(void);
extern void _TIE_xt_exceptions_SIMCALL(void);
#define XT_EXCW _TIE_xt_exceptions_EXCW
#define XT_SYSCALL _TIE_xt_exceptions_SYSCALL
#define XT_SIMCALL _TIE_xt_exceptions_SIMCALL
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_exceptions_HEADER */

View File

@ -0,0 +1,44 @@
/* Definitions for the xt_externalregisters TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_externalregisters_HEADER
#define _XTENSA_xt_externalregisters_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_externalregisters_RER(void);
extern void _TIE_xt_externalregisters_WER(void);
#define XT_RER _TIE_xt_externalregisters_RER
#define XT_WER _TIE_xt_externalregisters_WER
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_externalregisters_HEADER */

View File

@ -0,0 +1,55 @@
/* Definitions for the xt_interrupt TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_interrupt_HEADER
#define _XTENSA_xt_interrupt_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_interrupt_WAITI(immediate s);
extern unsigned _TIE_xt_interrupt_RSR_INTERRUPT(void);
extern void _TIE_xt_interrupt_WSR_INTSET(unsigned art);
extern void _TIE_xt_interrupt_WSR_INTCLEAR(unsigned art);
extern unsigned _TIE_xt_interrupt_RSR_INTENABLE(void);
extern void _TIE_xt_interrupt_WSR_INTENABLE(unsigned art);
extern void _TIE_xt_interrupt_XSR_INTENABLE(unsigned art /*inout*/);
#define XT_WAITI _TIE_xt_interrupt_WAITI
#define XT_RSR_INTERRUPT _TIE_xt_interrupt_RSR_INTERRUPT
#define XT_WSR_INTSET _TIE_xt_interrupt_WSR_INTSET
#define XT_WSR_INTCLEAR _TIE_xt_interrupt_WSR_INTCLEAR
#define XT_RSR_INTENABLE _TIE_xt_interrupt_RSR_INTENABLE
#define XT_WSR_INTENABLE _TIE_xt_interrupt_WSR_INTENABLE
#define XT_XSR_INTENABLE _TIE_xt_interrupt_XSR_INTENABLE
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_interrupt_HEADER */

View File

@ -0,0 +1,45 @@
/* Definitions for the xt_misc TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_misc_HEADER
#define _XTENSA_xt_misc_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern int _TIE_xt_misc_NSA(int s);
extern unsigned _TIE_xt_misc_NSAU(unsigned s);
#define XT_NSA _TIE_xt_misc_NSA
#define XT_NSAU _TIE_xt_misc_NSAU
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_misc_HEADER */

View File

@ -0,0 +1,61 @@
/* Definitions for the xt_mmu TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_mmu_HEADER
#define _XTENSA_xt_mmu_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_mmu_IDTLB(unsigned ars);
extern unsigned _TIE_xt_mmu_RDTLB1(unsigned ars);
extern unsigned _TIE_xt_mmu_RDTLB0(unsigned ars);
extern unsigned _TIE_xt_mmu_PDTLB(unsigned ars);
extern void _TIE_xt_mmu_WDTLB(unsigned art, unsigned ars);
extern void _TIE_xt_mmu_IITLB(unsigned ars);
extern unsigned _TIE_xt_mmu_RITLB1(unsigned ars);
extern unsigned _TIE_xt_mmu_RITLB0(unsigned ars);
extern unsigned _TIE_xt_mmu_PITLB(unsigned ars);
extern void _TIE_xt_mmu_WITLB(unsigned art, unsigned ars);
#define XT_IDTLB _TIE_xt_mmu_IDTLB
#define XT_RDTLB1 _TIE_xt_mmu_RDTLB1
#define XT_RDTLB0 _TIE_xt_mmu_RDTLB0
#define XT_PDTLB _TIE_xt_mmu_PDTLB
#define XT_WDTLB _TIE_xt_mmu_WDTLB
#define XT_IITLB _TIE_xt_mmu_IITLB
#define XT_RITLB1 _TIE_xt_mmu_RITLB1
#define XT_RITLB0 _TIE_xt_mmu_RITLB0
#define XT_PITLB _TIE_xt_mmu_PITLB
#define XT_WITLB _TIE_xt_mmu_WITLB
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_mmu_HEADER */

View File

@ -0,0 +1,47 @@
/* Definitions for the xt_mul TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_mul_HEADER
#define _XTENSA_xt_mul_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern int _TIE_xt_mul_MUL16S(short s, short t);
extern unsigned _TIE_xt_mul_MUL16U(unsigned short s, unsigned short t);
extern int _TIE_xt_mul_MULL(int s, int t);
#define XT_MUL16S _TIE_xt_mul_MUL16S
#define XT_MUL16U _TIE_xt_mul_MUL16U
#define XT_MULL _TIE_xt_mul_MULL
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_mul_HEADER */

View File

@ -0,0 +1,53 @@
/* Definitions for the xt_timer TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_timer_HEADER
#define _XTENSA_xt_timer_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern unsigned _TIE_xt_timer_RSR_CCOUNT(void);
extern void _TIE_xt_timer_WSR_CCOUNT(unsigned art);
extern void _TIE_xt_timer_XSR_CCOUNT(unsigned art /*inout*/);
extern unsigned _TIE_xt_timer_RSR_CCOMPARE0(void);
extern void _TIE_xt_timer_WSR_CCOMPARE0(unsigned art);
extern void _TIE_xt_timer_XSR_CCOMPARE0(unsigned art /*inout*/);
#define XT_RSR_CCOUNT _TIE_xt_timer_RSR_CCOUNT
#define XT_WSR_CCOUNT _TIE_xt_timer_WSR_CCOUNT
#define XT_XSR_CCOUNT _TIE_xt_timer_XSR_CCOUNT
#define XT_RSR_CCOMPARE0 _TIE_xt_timer_RSR_CCOMPARE0
#define XT_WSR_CCOMPARE0 _TIE_xt_timer_WSR_CCOMPARE0
#define XT_XSR_CCOMPARE0 _TIE_xt_timer_XSR_CCOMPARE0
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_timer_HEADER */

View File

@ -0,0 +1,43 @@
/* Definitions for the xt_trace TIE package */
/*
* Customer ID=7011; Build=0x2b6f6; Copyright (c) 2004 by Tensilica Inc. ALL RIGHTS RESERVED.
* These coded instructions, statements, and computer programs are the
* copyrighted works and confidential proprietary information of Tensilica Inc.
* They may not be modified, copied, reproduced, distributed, or disclosed to
* third parties in any manner, medium, or form, in whole or in part, without
* the prior written consent of Tensilica Inc.
*/
/* Do not modify. This is automatically generated.*/
#ifndef _XTENSA_xt_trace_HEADER
#define _XTENSA_xt_trace_HEADER
#ifdef __XTENSA__
#ifdef __XCC__
#include <xtensa/tie/xt_core.h>
/*
* The following prototypes describe intrinsic functions
* corresponding to TIE instructions. Some TIE instructions
* may produce multiple results (designated as "out" operands
* in the iclass section) or may have operands used as both
* inputs and outputs (designated as "inout"). However, the C
* and C++ languages do not provide syntax that can express
* the in/out/inout constraints of TIE intrinsics.
* Nevertheless, the compiler understands these constraints
* and will check that the intrinsic functions are used
* correctly. To improve the readability of these prototypes,
* the "out" and "inout" parameters are marked accordingly
* with comments.
*/
extern void _TIE_xt_trace_WSR_MMID(unsigned art);
#define XT_WSR_MMID _TIE_xt_trace_WSR_MMID
#endif /* __XCC__ */
#endif /* __XTENSA__ */
#endif /* !_XTENSA_xt_trace_HEADER */

View File

@ -0,0 +1,165 @@
/*
* xtensa-libdb-macros.h
*/
/* $Id: //depot/rel/Boreal/Xtensa/Software/libdb/xtensa-libdb-macros.h#2 $ */
/* Copyright (c) 2004-2008 Tensilica Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef __H_LIBDB_MACROS
#define __H_LIBDB_MACROS
/*
* This header file provides macros used to construct, identify and use
* "target numbers" that are assigned to various types of Xtensa processor
* registers and states. These target numbers are used by GDB in the remote
* protocol, and are thus used by all GDB debugger agents (targets).
* They are also used in ELF debugger information sections (stabs, dwarf, etc).
*
* These macros are separated from xtensa-libdb.h because they are needed
* by certain debugger agents that do not use or have access to libdb,
* e.g. the OCD daemon, RedBoot, XMON, etc.
*
* For the time being, for compatibility with certain 3rd party debugger
* software vendors, target numbers are limited to 16 bits. It is
* conceivable that this will be extended in the future to 32 bits.
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifndef uint32
#define uint32 unsigned int
#endif
#ifndef int32
#define int32 int
#endif
/*
* Macros to form register "target numbers" for various standard registers/states:
*/
#define XTENSA_DBREGN_INVALID -1 /* not a valid target number */
#define XTENSA_DBREGN_A(n) (0x0000+(n)) /* address registers a0..a15 */
#define XTENSA_DBREGN_B(n) (0x0010+(n)) /* boolean bits b0..b15 */
#define XTENSA_DBREGN_PC 0x0020 /* program counter */
/* 0x0021 RESERVED for use by Tensilica */
#define XTENSA_DBREGN_BO(n) (0x0022+(n)) /* boolean octuple-bits bo0..bo1 */
#define XTENSA_DBREGN_BQ(n) (0x0024+(n)) /* boolean quadruple-bits bq0..bq3 */
#define XTENSA_DBREGN_BD(n) (0x0028+(n)) /* boolean double-bits bd0..bd7 */
#define XTENSA_DBREGN_F(n) (0x0030+(n)) /* floating point registers f0..f15 */
#define XTENSA_DBREGN_VEC(n) (0x0040+(n)) /* Vectra vec regs v0..v15 */
#define XTENSA_DBREGN_VSEL(n) (0x0050+(n)) /* Vectra sel s0..s3 (V1) ..s7 (V2) */
#define XTENSA_DBREGN_VALIGN(n) (0x0058+(n)) /* Vectra valign regs u0..u3 */
#define XTENSA_DBREGN_VCOEFF(n) (0x005C+(n)) /* Vectra I vcoeff regs c0..c1 */
/* 0x005E..0x005F RESERVED for use by Tensilica */
#define XTENSA_DBREGN_AEP(n) (0x0060+(n)) /* HiFi2 Audio Engine regs aep0..aep7 */
#define XTENSA_DBREGN_AEQ(n) (0x0068+(n)) /* HiFi2 Audio Engine regs aeq0..aeq3 */
/* 0x006C..0x006F RESERVED for use by Tensilica */
#define XTENSA_DBREGN_DF(n) (0x0070+(n)) /* double floating point registers df0..df15 */
/* 0x0080..0x00FF RESERVED for use by Tensilica */
#define XTENSA_DBREGN_AR(n) (0x0100+(n)) /* physical address regs ar0..ar63
(note: only with window option) */
/* 0x0140..0x01FF RESERVED for use by Tensilica */
#define XTENSA_DBREGN_SREG(n) (0x0200+(n)) /* special registers 0..255 (core) */
#define XTENSA_DBREGN_BR XTENSA_DBREGN_SREG(0x04) /* all 16 boolean bits, BR */
#define XTENSA_DBREGN_MR(n) XTENSA_DBREGN_SREG(0x20+(n)) /* MAC16 registers m0..m3 */
#define XTENSA_DBREGN_UREG(n) (0x0300+(n)) /* user registers 0..255 (TIE) */
/* 0x0400..0x0FFF RESERVED for use by Tensilica */
/* 0x1000..0x1FFF user-defined regfiles */
/* 0x2000..0xEFFF other states (and regfiles) */
#define XTENSA_DBREGN_DBAGENT(n) (0xF000+(n)) /* non-processor "registers" 0..4095 for
3rd-party debugger agent defined use */
/* > 0xFFFF (32-bit) RESERVED for use by Tensilica */
/*#define XTENSA_DBREGN_CONTEXT(n) (0x02000000+((n)<<20))*/ /* add this macro's value to a target
number to identify a specific context 0..31
for context-replicated registers */
#define XTENSA_DBREGN_MASK 0xFFFF /* mask of valid target_number bits */
#define XTENSA_DBREGN_WRITE_SIDE 0x04000000 /* flag to request write half of a register
split into distinct read and write entries
with the same target number (currently only
valid in a couple of libdb API functions;
see xtensa-libdb.h for details) */
/*
* Macros to identify specific ranges of target numbers (formed above):
* NOTE: any context number (or other upper 12 bits) are considered
* modifiers and are thus stripped out for identification purposes.
*/
#define XTENSA_DBREGN_IS_VALID(tn) (((tn) & ~0xFFFF) == 0) /* just tests it's 16-bit unsigned */
#define XTENSA_DBREGN_IS_A(tn) (((tn) & 0xFFF0)==0x0000) /* is a0..a15 */
#define XTENSA_DBREGN_IS_B(tn) (((tn) & 0xFFF0)==0x0010) /* is b0..b15 */
#define XTENSA_DBREGN_IS_PC(tn) (((tn) & 0xFFFF)==0x0020) /* is program counter */
#define XTENSA_DBREGN_IS_BO(tn) (((tn) & 0xFFFE)==0x0022) /* is bo0..bo1 */
#define XTENSA_DBREGN_IS_BQ(tn) (((tn) & 0xFFFC)==0x0024) /* is bq0..bq3 */
#define XTENSA_DBREGN_IS_BD(tn) (((tn) & 0xFFF8)==0x0028) /* is bd0..bd7 */
#define XTENSA_DBREGN_IS_F(tn) (((tn) & 0xFFF0)==0x0030) /* is f0..f15 */
#define XTENSA_DBREGN_IS_VEC(tn) (((tn) & 0xFFF0)==0x0040) /* is v0..v15 */
#define XTENSA_DBREGN_IS_VSEL(tn) (((tn) & 0xFFF8)==0x0050) /* is s0..s7 (s0..s3 in V1) */
#define XTENSA_DBREGN_IS_VALIGN(tn) (((tn) & 0xFFFC)==0x0058) /* is u0..u3 */
#define XTENSA_DBREGN_IS_VCOEFF(tn) (((tn) & 0xFFFE)==0x005C) /* is c0..c1 */
#define XTENSA_DBREGN_IS_AEP(tn) (((tn) & 0xFFF8)==0x0060) /* is aep0..aep7 */
#define XTENSA_DBREGN_IS_AEQ(tn) (((tn) & 0xFFFC)==0x0068) /* is aeq0..aeq3 */
#define XTENSA_DBREGN_IS_DF(tn) (((tn) & 0xFFF0)==0x0070) /* is df0..df15 */
#define XTENSA_DBREGN_IS_AR(tn) (((tn) & 0xFFC0)==0x0100) /* is ar0..ar63 */
#define XTENSA_DBREGN_IS_SREG(tn) (((tn) & 0xFF00)==0x0200) /* is special register */
#define XTENSA_DBREGN_IS_BR(tn) (((tn) & 0xFFFF)==XTENSA_DBREGN_SREG(0x04)) /* is BR */
#define XTENSA_DBREGN_IS_MR(tn) (((tn) & 0xFFFC)==XTENSA_DBREGN_SREG(0x20)) /* m0..m3 */
#define XTENSA_DBREGN_IS_UREG(tn) (((tn) & 0xFF00)==0x0300) /* is user register */
#define XTENSA_DBREGN_IS_DBAGENT(tn) (((tn) & 0xF000)==0xF000) /* is non-processor */
/*#define XTENSA_DBREGN_IS_CONTEXT(tn) (((tn) & 0x02000000) != 0)*/ /* specifies context # */
/*
* Macros to extract register index from a register "target number"
* when a specific range has been identified using one of the _IS_ macros above.
* These macros only return a useful value if the corresponding _IS_ macro returns true.
*/
#define XTENSA_DBREGN_A_INDEX(tn) ((tn) & 0x0F) /* 0..15 for a0..a15 */
#define XTENSA_DBREGN_B_INDEX(tn) ((tn) & 0x0F) /* 0..15 for b0..b15 */
#define XTENSA_DBREGN_BO_INDEX(tn) ((tn) & 0x01) /* 0..1 for bo0..bo1 */
#define XTENSA_DBREGN_BQ_INDEX(tn) ((tn) & 0x03) /* 0..3 for bq0..bq3 */
#define XTENSA_DBREGN_BD_INDEX(tn) ((tn) & 0x07) /* 0..7 for bd0..bd7 */
#define XTENSA_DBREGN_F_INDEX(tn) ((tn) & 0x0F) /* 0..15 for f0..f15 */
#define XTENSA_DBREGN_VEC_INDEX(tn) ((tn) & 0x0F) /* 0..15 for v0..v15 */
#define XTENSA_DBREGN_VSEL_INDEX(tn) ((tn) & 0x07) /* 0..7 for s0..s7 */
#define XTENSA_DBREGN_VALIGN_INDEX(tn) ((tn) & 0x03) /* 0..3 for u0..u3 */
#define XTENSA_DBREGN_VCOEFF_INDEX(tn) ((tn) & 0x01) /* 0..1 for c0..c1 */
#define XTENSA_DBREGN_AEP_INDEX(tn) ((tn) & 0x07) /* 0..7 for aep0..aep7 */
#define XTENSA_DBREGN_AEQ_INDEX(tn) ((tn) & 0x03) /* 0..3 for aeq0..aeq3 */
#define XTENSA_DBREGN_DF_INDEX(tn) ((tn) & 0x0F) /* 0..15 for df0..df15 */
#define XTENSA_DBREGN_AR_INDEX(tn) ((tn) & 0x3F) /* 0..63 for ar0..ar63 */
#define XTENSA_DBREGN_SREG_INDEX(tn) ((tn) & 0xFF) /* 0..255 for special registers */
#define XTENSA_DBREGN_MR_INDEX(tn) ((tn) & 0x03) /* 0..3 for m0..m3 */
#define XTENSA_DBREGN_UREG_INDEX(tn) ((tn) & 0xFF) /* 0..255 for user registers */
#define XTENSA_DBREGN_DBAGENT_INDEX(tn) ((tn) & 0xFFF) /* 0..4095 for non-processor */
/*#define XTENSA_DBREGN_CONTEXT_INDEX(tn) (((tn) >> 20) & 0x1F)*/ /* 0..31 context numbers */
#ifdef __cplusplus
}
#endif
#endif /* __H_LIBDB_MACROS */

View File

@ -0,0 +1,149 @@
/* xer-constants.h -- various constants describing external registers accessed
via wer and rer.
TODO: find a better prefix. Also conditionalize certain constants based
on number of cores and interrupts actually present.
*/
/*
* Copyright (c) 1999-2008 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <xtensa/config/core.h>
#define NUM_INTERRUPTS 27
#define NUM_CORES 4
/* Routing of NMI (BInterrupt2) and interrupts 0..n-1 (BInterrupt3+)
RER reads
WER writes
*/
#define XER_MIROUT 0x0000
#define XER_MIROUT_LAST (XER_MIROUT + NUM_INTERRUPTS)
/* IPI to core M (all 16 causes).
RER reads
WER clears
*/
#define XER_MIPICAUSE 0x0100
#define XER_MIPICAUSE_FIELD_A_FIRST 0x0
#define XER_MIPICAUSE_FIELD_A_LAST 0x0
#define XER_MIPICAUSE_FIELD_B_FIRST 0x1
#define XER_MIPICAUSE_FIELD_B_LAST 0x3
#define XER_MIPICAUSE_FIELD_C_FIRST 0x4
#define XER_MIPICAUSE_FIELD_C_LAST 0x7
#define XER_MIPICAUSE_FIELD_D_FIRST 0x8
#define XER_MIPICAUSE_FIELD_D_LAST 0xF
/* IPI from cause bit 0..15
RER invalid
WER sets
*/
#define XER_MIPISET 0x0140
#define XER_MIPISET_LAST 0x014F
/* Global enable
RER read
WER clear
*/
#define XER_MIENG 0x0180
/* Global enable
RER invalid
WER set
*/
#define XER_MIENG_SET 0x0184
/* Global assert
RER read
WER clear
*/
#define XER_MIASG 0x0188
/* Global enable
RER invalid
WER set
*/
#define XER_MIASG_SET 0x018C
/* IPI partition register
RER read
WER write
*/
#define XER_PART 0x0190
#define XER_IPI0 0x0
#define XER_IPI1 0x1
#define XER_IPI2 0x2
#define XER_IPI3 0x3
#define XER_PART_ROUTE_IPI(NUM, FIELD) ((NUM) << ((FIELD) << 2))
#define XER_PART_ROUTE_IPI_CAUSE(TO_A, TO_B, TO_C, TO_D) \
(XER_PART_ROUTE_IPI(TO_A, XER_IPI0) | \
XER_PART_ROUTE_IPI(TO_B, XER_IPI1) | \
XER_PART_ROUTE_IPI(TO_C, XER_IPI2) | \
XER_PART_ROUTE_IPI(TO_D, XER_IPI3))
#define XER_IPI_WAKE_EXT_INTERRUPT XCHAL_EXTINT0_NUM
#define XER_IPI_WAKE_CAUSE XER_MIPICAUSE_FIELD_C_FIRST
#define XER_IPI_WAKE_ADDRESS (XER_MIPISET + XER_IPI_WAKE_CAUSE)
#define XER_DEFAULT_IPI_ROUTING XER_PART_ROUTE_IPI_CAUSE(XER_IPI1, XER_IPI0, XER_IPI2, XER_IPI3)
/* System configuration ID
RER read
WER invalid
*/
#define XER_SYSCFGID 0x01A0
/* RunStall to slave processors
RER read
WER write
*/
#define XER_MPSCORE 0x0200
/* Cache coherency ON
RER read
WER write
*/
#define XER_CCON 0x0220

View File

@ -0,0 +1,160 @@
/* xtruntime-frames.h - exception stack frames for single-threaded run-time */
/* $Id: //depot/rel/Boreal/Xtensa/OS/include/xtensa/xtruntime-frames.h#2 $ */
/*
* Copyright (c) 2002-2007 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _XTRUNTIME_FRAMES_H_
#define _XTRUNTIME_FRAMES_H_
#include <xtensa/config/core.h>
/* Macros that help define structures for both C and assembler: */
#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
#define STRUCT_BEGIN .pushsection .text; .struct 0
#define STRUCT_FIELD(ctype,size,pre,name) pre##name: .space size
#define STRUCT_AFIELD(ctype,size,pre,name,n) pre##name: .space (size)*(n)
#define STRUCT_END(sname) sname##Size:; .popsection
#else /*_ASMLANGUAGE||__ASSEMBLER__*/
#define STRUCT_BEGIN typedef struct {
#define STRUCT_FIELD(ctype,size,pre,name) ctype name;
#define STRUCT_AFIELD(ctype,size,pre,name,n) ctype name[n];
#define STRUCT_END(sname) } sname;
#endif /*_ASMLANGUAGE||__ASSEMBLER__*/
/*
* Kernel vector mode exception stack frame.
*
* NOTE: due to the limited range of addi used in the current
* kernel exception vector, and the fact that historically
* the vector is limited to 12 bytes, the size of this
* stack frame is limited to 128 bytes (currently at 64).
*/
STRUCT_BEGIN
STRUCT_FIELD (long,4,KEXC_,pc) /* "parm" */
STRUCT_FIELD (long,4,KEXC_,ps)
STRUCT_AFIELD(long,4,KEXC_,areg, 4) /* a12 .. a15 */
STRUCT_FIELD (long,4,KEXC_,sar) /* "save" */
#if XCHAL_HAVE_LOOPS
STRUCT_FIELD (long,4,KEXC_,lcount)
STRUCT_FIELD (long,4,KEXC_,lbeg)
STRUCT_FIELD (long,4,KEXC_,lend)
#endif
#if XCHAL_HAVE_MAC16
STRUCT_FIELD (long,4,KEXC_,acclo)
STRUCT_FIELD (long,4,KEXC_,acchi)
STRUCT_AFIELD(long,4,KEXC_,mr, 4)
#endif
STRUCT_END(KernelFrame)
/*
* User vector mode exception stack frame:
*
* WARNING: if you modify this structure, you MUST modify the
* computation of the pad size (ALIGNPAD) accordingly.
*/
STRUCT_BEGIN
STRUCT_FIELD (long,4,UEXC_,pc)
STRUCT_FIELD (long,4,UEXC_,ps)
STRUCT_FIELD (long,4,UEXC_,sar)
STRUCT_FIELD (long,4,UEXC_,vpri)
#ifdef __XTENSA_CALL0_ABI__
STRUCT_FIELD (long,4,UEXC_,a0)
#endif
STRUCT_FIELD (long,4,UEXC_,a2)
STRUCT_FIELD (long,4,UEXC_,a3)
STRUCT_FIELD (long,4,UEXC_,a4)
STRUCT_FIELD (long,4,UEXC_,a5)
#ifdef __XTENSA_CALL0_ABI__
STRUCT_FIELD (long,4,UEXC_,a6)
STRUCT_FIELD (long,4,UEXC_,a7)
STRUCT_FIELD (long,4,UEXC_,a8)
STRUCT_FIELD (long,4,UEXC_,a9)
STRUCT_FIELD (long,4,UEXC_,a10)
STRUCT_FIELD (long,4,UEXC_,a11)
STRUCT_FIELD (long,4,UEXC_,a12)
STRUCT_FIELD (long,4,UEXC_,a13)
STRUCT_FIELD (long,4,UEXC_,a14)
STRUCT_FIELD (long,4,UEXC_,a15)
#endif
STRUCT_FIELD (long,4,UEXC_,exccause) /* NOTE: can probably rid of this one (pass direct) */
#if XCHAL_HAVE_LOOPS
STRUCT_FIELD (long,4,UEXC_,lcount)
STRUCT_FIELD (long,4,UEXC_,lbeg)
STRUCT_FIELD (long,4,UEXC_,lend)
#endif
#if XCHAL_HAVE_MAC16
STRUCT_FIELD (long,4,UEXC_,acclo)
STRUCT_FIELD (long,4,UEXC_,acchi)
STRUCT_AFIELD(long,4,UEXC_,mr, 4)
#endif
/* ALIGNPAD is the 16-byte alignment padding. */
#ifdef __XTENSA_CALL0_ABI__
# define CALL0_ABI 1
#else
# define CALL0_ABI 0
#endif
#define ALIGNPAD ((3 + XCHAL_HAVE_LOOPS*1 + XCHAL_HAVE_MAC16*2 + CALL0_ABI*1) & 3)
#if ALIGNPAD
STRUCT_AFIELD(long,4,UEXC_,pad, ALIGNPAD) /* 16-byte alignment padding */
#endif
/*STRUCT_AFIELD(char,1,UEXC_,ureg, (XCHAL_CPEXTRA_SA_SIZE_TOR2+3)&-4)*/ /* not used, and doesn't take alignment into account */
STRUCT_END(UserFrame)
#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
/* Check for UserFrameSize small enough not to require rounding...: */
/* Skip 16-byte save area, then 32-byte space for 8 regs of call12
* (which overlaps with 16-byte GCC nested func chaining area),
* then exception stack frame: */
.set UserFrameTotalSize, 16+32+UserFrameSize
/* Greater than 112 bytes? (max range of ADDI, both signs, when aligned to 16 bytes): */
.ifgt UserFrameTotalSize-112
/* Round up to 256-byte multiple to accelerate immediate adds: */
.set UserFrameTotalSize, ((UserFrameTotalSize+255) & 0xFFFFFF00)
.endif
# define ESF_TOTALSIZE UserFrameTotalSize
#endif /* _ASMLANGUAGE || __ASSEMBLER__ */
#if XCHAL_NUM_CONTEXTS > 1
/* Structure of info stored on new context's stack for setup: */
STRUCT_BEGIN
STRUCT_FIELD (long,4,INFO_,sp)
STRUCT_FIELD (long,4,INFO_,arg1)
STRUCT_FIELD (long,4,INFO_,funcpc)
STRUCT_FIELD (long,4,INFO_,prevps)
STRUCT_END(SetupInfo)
#endif
#define KERNELSTACKSIZE 1024
#endif /* _XTRUNTIME_FRAMES_H_ */

View File

@ -0,0 +1,184 @@
/*
* xtruntime.h -- general C definitions for single-threaded run-time
*
* Copyright (c) 2002-2008 Tensilica Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef XTRUNTIME_H
#define XTRUNTIME_H
#include <xtensa/config/core.h>
#include <xtensa/config/specreg.h>
#ifndef XTSTR
#define _XTSTR(x) # x
#define XTSTR(x) _XTSTR(x)
#endif
#define _xtos_set_execption_handler _xtos_set_exception_handler /* backward compatibility */
#define _xtos_set_saved_intenable _xtos_ints_on /* backward compatibility */
#define _xtos_clear_saved_intenable _xtos_ints_off /* backward compatibility */
#if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__)
#ifdef __cplusplus
extern "C" {
#endif
/*typedef void (_xtos_timerdelta_func)(int);*/
#ifdef __cplusplus
typedef void (_xtos_handler_func)(...);
#else
typedef void (_xtos_handler_func)();
#endif
typedef _xtos_handler_func *_xtos_handler;
/*
* unsigned XTOS_SET_INTLEVEL(int intlevel);
* This macro sets the current interrupt level.
* The 'intlevel' parameter must be a constant.
* This macro returns a 32-bit value that must be passed to
* XTOS_RESTORE_INTLEVEL() to restore the previous interrupt level.
* XTOS_RESTORE_JUST_INTLEVEL() also does this, but in XEA2 configs
* it restores only PS.INTLEVEL rather than the entire PS register
* and thus is slower.
*/
#if !XCHAL_HAVE_INTERRUPTS
# define XTOS_SET_INTLEVEL(intlevel) 0
# define XTOS_SET_MIN_INTLEVEL(intlevel) 0
# define XTOS_RESTORE_INTLEVEL(restoreval)
# define XTOS_RESTORE_JUST_INTLEVEL(restoreval)
#elif XCHAL_HAVE_XEA2
/* In XEA2, we can simply safely set PS.INTLEVEL directly: */
/* NOTE: these asm macros don't modify memory, but they are marked
* as such to act as memory access barriers to the compiler because
* these macros are sometimes used to delineate critical sections;
* function calls are natural barriers (the compiler does not know
* whether a function modifies memory) unless declared to be inlined. */
# define XTOS_SET_INTLEVEL(intlevel) ({ unsigned __tmp; \
__asm__ __volatile__( "rsil %0, " XTSTR(intlevel) "\n" \
: "=a" (__tmp) : : "memory" ); \
__tmp;})
# define XTOS_SET_MIN_INTLEVEL(intlevel) ({ unsigned __tmp, __tmp2, __tmp3; \
__asm__ __volatile__( "rsr %0, " XTSTR(PS) "\n" /* get old (current) PS.INTLEVEL */ \
"movi %2, " XTSTR(intlevel) "\n" \
"extui %1, %0, 0, 4\n" /* keep only INTLEVEL bits of parameter */ \
"blt %2, %1, 1f\n" \
"rsil %0, " XTSTR(intlevel) "\n" \
"1:\n" \
: "=a" (__tmp), "=&a" (__tmp2), "=&a" (__tmp3) : : "memory" ); \
__tmp;})
# define XTOS_RESTORE_INTLEVEL(restoreval) do{ unsigned __tmp = (restoreval); \
__asm__ __volatile__( "wsr %0, " XTSTR(PS) " ; rsync\n" \
: : "a" (__tmp) : "memory" ); \
}while(0)
# define XTOS_RESTORE_JUST_INTLEVEL(restoreval) _xtos_set_intlevel(restoreval)
#else
/* In XEA1, we have to rely on INTENABLE register virtualization: */
extern unsigned _xtos_set_vpri( unsigned vpri );
extern unsigned _xtos_vpri_enabled; /* current virtual priority */
# define XTOS_SET_INTLEVEL(intlevel) _xtos_set_vpri(~XCHAL_INTLEVEL_ANDBELOW_MASK(intlevel))
# define XTOS_SET_MIN_INTLEVEL(intlevel) _xtos_set_vpri(_xtos_vpri_enabled & ~XCHAL_INTLEVEL_ANDBELOW_MASK(intlevel))
# define XTOS_RESTORE_INTLEVEL(restoreval) _xtos_set_vpri(restoreval)
# define XTOS_RESTORE_JUST_INTLEVEL(restoreval) _xtos_set_vpri(restoreval)
#endif
/*
* The following macros build upon the above. They are generally used
* instead of invoking the SET_INTLEVEL and SET_MIN_INTLEVEL macros directly.
* They all return a value that can be used with XTOS_RESTORE_INTLEVEL()
* or _xtos_restore_intlevel() or _xtos_restore_just_intlevel() to restore
* the effective interrupt level to what it was before the macro was invoked.
* In XEA2, the DISABLE macros are much faster than the MASK macros
* (in all configs, DISABLE sets the effective interrupt level, whereas MASK
* makes ensures the effective interrupt level is at least the level given
* without lowering it; in XEA2 with INTENABLE virtualization, these macros
* affect PS.INTLEVEL only, not the virtual priority, so DISABLE has partial
* MASK semantics).
*
* A typical critical section sequence might be:
* unsigned rval = XTOS_DISABLE_EXCM_INTERRUPTS;
* ... critical section ...
* XTOS_RESTORE_INTLEVEL(rval);
*/
/* Enable all interrupts (those activated with _xtos_ints_on()): */
#define XTOS_ENABLE_INTERRUPTS XTOS_SET_INTLEVEL(0)
/* Disable low priority level interrupts (they can interact with the OS): */
#define XTOS_DISABLE_LOWPRI_INTERRUPTS XTOS_SET_INTLEVEL(XCHAL_NUM_LOWPRI_LEVELS)
#define XTOS_MASK_LOWPRI_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XCHAL_NUM_LOWPRI_LEVELS)
/* Disable interrupts that can interact with the OS: */
#define XTOS_DISABLE_EXCM_INTERRUPTS XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL)
#define XTOS_MASK_EXCM_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XCHAL_EXCM_LEVEL)
#if 0 /* XTOS_LOCK_LEVEL is not exported to applications */
/* Disable interrupts that can interact with the OS, or manipulate virtual INTENABLE: */
#define XTOS_DISABLE_LOCK_INTERRUPTS XTOS_SET_INTLEVEL(XTOS_LOCK_LEVEL)
#define XTOS_MASK_LOCK_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XTOS_LOCK_LEVEL)
#endif
/* Disable ALL interrupts (not for common use, particularly if one's processor
* configuration has high-level interrupts and one cares about their latency): */
#define XTOS_DISABLE_ALL_INTERRUPTS XTOS_SET_INTLEVEL(15)
extern unsigned int _xtos_ints_off( unsigned int mask );
extern unsigned int _xtos_ints_on( unsigned int mask );
extern unsigned _xtos_set_intlevel( int intlevel );
extern unsigned _xtos_set_min_intlevel( int intlevel );
extern unsigned _xtos_restore_intlevel( unsigned restoreval );
extern unsigned _xtos_restore_just_intlevel( unsigned restoreval );
extern _xtos_handler _xtos_set_interrupt_handler( int n, _xtos_handler f );
extern _xtos_handler _xtos_set_interrupt_handler_arg( int n, _xtos_handler f, void *arg );
extern _xtos_handler _xtos_set_exception_handler( int n, _xtos_handler f );
extern void _xtos_memep_initrams( void );
extern void _xtos_memep_enable( int flags );
/* Deprecated (but kept because they were documented): */
extern unsigned int _xtos_read_ints( void ); /* use xthal_get_interrupt() instead */
extern void _xtos_clear_ints( unsigned int mask ); /* use xthal_set_intclear() instead */
#if XCHAL_NUM_CONTEXTS > 1
extern unsigned _xtos_init_context(int context_num, int stack_size,
_xtos_handler_func *start_func, int arg1);
#endif
/* Deprecated: */
#if XCHAL_NUM_TIMERS > 0
extern void _xtos_timer_0_delta( int cycles );
#endif
#if XCHAL_NUM_TIMERS > 1
extern void _xtos_timer_1_delta( int cycles );
#endif
#if XCHAL_NUM_TIMERS > 2
extern void _xtos_timer_2_delta( int cycles );
#endif
#if XCHAL_NUM_TIMERS > 3
extern void _xtos_timer_3_delta( int cycles );
#endif
#ifdef __cplusplus
}
#endif
#endif /* !_ASMLANGUAGE && !__ASSEMBLER__ */
#endif /* XTRUNTIME_H */