diff --git a/components/esp8266/component.mk b/components/esp8266/component.mk index 77d721ab..c306fdef 100644 --- a/components/esp8266/component.mk +++ b/components/esp8266/component.mk @@ -3,7 +3,7 @@ # COMPONENT_ADD_INCLUDEDIRS += include COMPONENT_PRIV_INCLUDEDIRS := include/driver -COMPONENT_SRCDIRS := driver +COMPONENT_SRCDIRS := driver source LIBS ?= ifndef CONFIG_NO_BLOBS diff --git a/components/esp8266/source/os_cpu_a.S b/components/esp8266/source/os_cpu_a.S new file mode 100644 index 00000000..36eca079 --- /dev/null +++ b/components/esp8266/source/os_cpu_a.S @@ -0,0 +1,436 @@ +/* +********************************************************************************************************* +* Xtensa Port +* +* Target : All Xtensa configurable and Diamond preconfigured processors; windowed and call0 ABI. +* Port By : Ross Morley, Tensilica Inc. : ross@tensilica.com, ross@computer.org +* Web URL : http://www.tensilica.com +* +********************************************************************************************************* +*/ + #include + #include + #include "xtos-internal.h" + + #include "freertos/xtensa_rtos.h" + + .extern _xt_tick_divisor + .extern _xt_context_save + .extern _xt_context_restore + .extern pxCurrentTCB + .extern CCOMPARE_AddVal + +// .section .iram.text + .section .text + +/* +********************************************************************************************************** +* vPortYield +* void vPortYield(void) +* maually switch the context. +* +********************************************************************************************************** +*/ + + .globl vPortYield + .type vPortYield,@function + .align 4 + .literal_position +vPortYield: + wsr a0, EXCSAVE_1 /* preserve a0 */ + addi sp, sp, -XT_STK_FRMSZ + s32i a0, sp, XT_STK_PC + + addi a0, sp, XT_STK_FRMSZ + s32i a0, sp, XT_STK_A1 + rsr a0, PS + s32i a0, sp, XT_STK_PS + rsr a0, EXCSAVE_1 /* save interruptee's a0 */ + s32i a0, sp, XT_STK_A0 + movi a0, _xt_user_exit + s32i a0, sp, XT_STK_EXIT + + call0 _xt_int_enter + call0 vPortEnterCritical + call0 vTaskSwitchContext + call0 vPortExitCritical + call0 _xt_int_exit + + ret + +/* +********************************************************************************************************** +* _xt_int_enter +* void _xt_int_enter(void) +* +* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for uC/OS-II. +* Saves the rest of the interrupt context (not already saved) and implements OSIntEnter(). +* May only be called from assembly code by the 'call0' instruction, with interrupts disabled. +* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h. +* +********************************************************************************************************** +*/ + .globl _xt_int_enter + .type _xt_int_enter,@function + .align 4 + .literal_position +_xt_int_enter: + + /* Save a12-13 in the stack frame as required by _xt_context_save. */ + s32i a12, sp, XT_STK_A12 + s32i a13, sp, XT_STK_A13 + + /* Save return address in a safe place (free a0). */ + mov a12, a0 + + /* Save the rest of the interrupted context (preserves A12-13). */ + call0 _xt_context_save + + movi a0, pxCurrentTCB + l32i a0, a0, 0 + s32i sp, a0, 0 + + /* Retrieve the return address and return to interrupt handler. */ + mov a0, a12 + ret + +/* +********************************************************************************************************** +* _xt_int_exit +* void _xt_int_exit(void) +* +* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for uC/OS-II. +* Calls OSIntExit() to perform task context switching, restores the (possibly) new task's +* context, and returns to the exit dispatcher saved in the task's stack frame at XT_STK_EXIT. +* May only be called from assembly code by the 'call0' instruction. Does not return to caller. +* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h. +* +********************************************************************************************************** +*/ + .globl _xt_int_exit + .type _xt_int_exit,@function + .align 4 +_xt_int_exit: + /* + Call0 ABI callee-saved regs a12-15 need to be saved before possible preemption. + However a12-13 were already saved by _xt_int_enter(). + Save A14, A15 in sp to a14, a15 in cpu + */ + #ifdef __XTENSA_CALL0_ABI__ + s32i a14, sp, XT_STK_A14 + s32i a15, sp, XT_STK_A15 + #endif + + /* + Save A14, A15 in sp to a14, a15 in cpu + */ + movi sp, pxCurrentTCB + l32i sp, sp, 0 + l32i sp, sp, 0 + + /* + We come here only if there was no context switch, that is if this + is a nested interrupt or the interrupted task was not preempted. + We are still on the same stack so there's no need to load the SP. + */ + movi a14, pxCurrentTCB + l32i a14, a14, 0 + addi a15, sp, XT_STK_FRMSZ + s32i a15, a14 , 0 + + /* Restore full context from interrupt stack frame and return to exit dispatcher. */ + call0 _xt_context_restore + + /* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */ + #ifdef __XTENSA_CALL0_ABI__ + l32i a14, sp, XT_STK_A14 + l32i a15, sp, XT_STK_A15 + #endif + + #if XCHAL_CP_NUM > 0 + /* Ensure wsr.CPENABLE has completed. */ + rsync + #endif + + /* + Must return via the exit dispatcher corresponding to the entrypoint from which + this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt + stack frame is deallocated in the exit dispatcher. + */ + /*rsil a0, 0 */ /* reenable ints above level 1 */ + /*rsync*/ + + l32i a0, sp, XT_STK_EXIT + ret + +/* +********************************************************************************************************** +* _xt_timer_int +* void _xt_timer_int(void) +* +* Implements the Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function for uC/OS-II. +* Called every timer interrupt. +* Manages the tick timer and calls OSTimeTick() every tick, and calls OSTmrSignal() when required. +* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h. +* +* Callable from C (obeys ABI conventions). Implemented in assmebly code for performance. +* +********************************************************************************************************** +*/ + /* Define local variable offsets in stack frame for Call0 ABI. */ + #ifdef __XTENSA_CALL0_ABI__ + #define __xt_timer_int_a0 0 /* ENTRY()/RET() saves/restores */ + #define __xt_timer_int_a2 4 /* preserve a2 */ + #define __xt_timer_int_a3 8 /* preserve a3 */ + #endif + + .globl _xt_timer_int + .type _xt_timer_int,@function + .align 4 +_xt_timer_int: + + /* + Xtensa timers work by comparing a cycle counter with a preset value. Once the match occurs + an interrupt is generated, and the handler has to set a new cycle count into the comparator. + To avoid clock drift due to interrupt latency, the new cycle count is computed from the old, + not the time the interrupt was serviced. However if a timer interrupt is ever serviced more + than one tick late, it is necessary to process multiple ticks until the new cycle count is + in the future, otherwise the next timer interrupt would not occur until after the cycle + counter had wrapped (2^32 cycles later). + + do { + ticks++; + old_ccompare = read_ccompare_i(); + write_ccompare_i( old_ccompare + divisor ); + service one tick; + diff = read_ccount() - old_ccompare; + } while ( diff > divisor ); + */ + + ENTRY(16) + +.L_xt_timer_int_catchup: + + /* Update the timer comparator for the next tick. */ + #ifdef XT_CLOCK_FREQ + movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */ + #else + movi a3, _xt_tick_divisor + l32i a2, a3, 0 /* a2 = comparator increment */ + #endif + rsr a3, XT_CCOMPARE /* a3 = old comparator value */ + add a4, a3, a2 /* a4 = new comparator value */ + wsr a4, XT_CCOMPARE /* update comp. and clear interrupt */ + esync + + #ifdef __XTENSA_CALL0_ABI__ + /* Preserve a2 and a3 across C calls. */ + s32i a2, sp, __xt_timer_int_a2 + s32i a3, sp, __xt_timer_int_a3 + #endif + + /* Call the uCOS-II tick handler. */ + #ifdef __XTENSA_CALL0_ABI__ /* OSTimeTick() */ + call0 xPortSysTickHandle + #else + call4 xTaskIncrementTick + #endif + + #ifdef __XTENSA_CALL0_ABI__ + /* Restore a2 and a3. */ + l32i a2, sp, __xt_timer_int_a2 + l32i a3, sp, __xt_timer_int_a3 + #endif + + /* Check if we need to process more ticks to catch up. */ + esync /* ensure comparator update complete */ + rsr a4, CCOUNT /* a4 = cycle count */ + sub a4, a4, a3 /* diff = ccount - old comparator */ + blt a2, a4, .L_xt_timer_int_catchup /* repeat while diff > divisor */ + + RET(16) + + + + #ifdef __XTENSA_CALL0_ABI__ + #define __xt_timer_int1_a0 0 /* ENTRY()/RET() saves/restores */ + #define __xt_timer_int1_a2 4 /* preserve a2 */ + #define __xt_timer_int1_a3 8 /* preserve a3 */ + #endif + + .globl _xt_timer_int1 + .type _xt_timer_int1,@function + .align 4 +_xt_timer_int1: + + ENTRY(16) + + #ifdef __XTENSA_CALL0_ABI__ + /* Preserve a2 and a3 across C calls. */ + s32i a2, sp, __xt_timer_int1_a2 + s32i a3, sp, __xt_timer_int1_a3 + #endif + + /* Call the uCOS-II tick handler. */ + call0 vTaskSwitchContext + + #ifdef __XTENSA_CALL0_ABI__ + /* Restore a2 and a3. */ + l32i a2, sp, __xt_timer_int1_a2 + l32i a3, sp, __xt_timer_int1_a3 + #endif + + RET(16) + + + +/* +********************************************************************************************************** +* _xt_tick_timer_init +* void _xt_tick_timer_init(void) +* +* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has already been been called). +* Callable from C (obeys ABI conventions on entry). +* +********************************************************************************************************** +*/ + .globl _xt_tick_timer_init + .type _xt_tick_timer_init,@function + .align 4 +_xt_tick_timer_init: + + ENTRY(16) + + /* Set up the periodic tick timer (assume enough time to complete init). */ + #ifdef XT_CLOCK_FREQ + movi a3, XT_TICK_DIVISOR + #else + movi a2, _xt_tick_divisor + l32i a3, a2, 0 + #endif + rsr a2, CCOUNT /* current cycle count */ + add a2, a2, a3 /* time of first timer interrupt */ + wsr a2, XT_CCOMPARE /* set the comparator */ + + /* Enable the timer interrupt at the device level. */ + movi a2, 0 /* protect critical section */ + xsr a2, INTENABLE + movi a3, XT_TIMER_INTEN + or a2, a2, a3 + wsr a2, INTENABLE /* set new INTENABLE, no need to rsync */ + + RET(16) + + + .globl _xt_update_xt_ccompare + .type _xt_update_xt_ccompare,@function + .align 4 +_xt_update_xt_ccompare: + + ENTRY(16) + + /* Set up the periodic tick timer (assume enough time to complete init). */ + movi a3, XT_TICK_DIVISOR + rsr a2, XT_CCOMPARE /* current cycle count */ + add a2, a2, a3 /* time of first timer interrupt */ + wsr a2, XT_CCOMPARE /* set the comparator */ + esync + + RET(16) + + + .globl _xt_set_xt_ccompare_val + .type _xt_set_xt_ccompare_val,@function + .align 4 +_xt_set_xt_ccompare_val: + + ENTRY(16) + + /* Set up the periodic tick timer (assume enough time to complete init). */ + movi a3, CCOMPARE_AddVal + l32i a2, a3, 0 + wsr a2, XT_CCOMPARE /* set the comparator */ + esync + + RET(16) + +/* +********************************************************************************************************** +* isr_unmask +********************************************************************************************************** +*/ + + + .globl _xt_isr_unmask + .type _xt_isr_unmask,@function + .align 4 +_xt_isr_unmask: + + ENTRY(16) + + /* Enable the timer interrupt at the device level. */ + xtos_lock a7 + rsr a3, INTENABLE + or a5, a3, a2 + wsr a5, INTENABLE + xtos_unlock a7 + mov a2, a3 + /*movi a3, 0 */ /* protect critical section */ + /*xsr a3, INTENABLE */ + /*or a3, a2, a3 */ + /*wsr a3, INTENABLE */ /* set new INTENABLE, no need to rsync */ + + RET(16) + + + .globl _xt_isr_mask + .type _xt_isr_mask,@function + .align 4 +_xt_isr_mask: + + ENTRY(16) + + /* Enable the timer interrupt at the device level. */ + xtos_lock a7 + rsr a3, INTENABLE + or a5, a3, a2 + xor a5, a5, a2 + wsr a5, INTENABLE + xtos_unlock a7 + mov a2, a3 + /*movi a3, 0 */ /* protect critical section */ + /*xsr a3, INTENABLE*/ + /*and a3, a2, a3 */ + /*wsr a3, INTENABLE*/ /* set new INTENABLE, no need to rsync */ + + RET(16) + + + + + + + + + .global _xt_read_ints + .type _xt_read_ints,@function + .align 4 +_xt_read_ints: + + ENTRY(16) + + rsr a2, INTERRUPT + + RET(16) + + .global _xt_clear_ints + .type _xt_clear_ints,@function + .align 4 +_xt_clear_ints: + + ENTRY(16) + + wsr a2, INTCLEAR + + RET(16) + diff --git a/components/esp8266/source/xtensa_context.S b/components/esp8266/source/xtensa_context.S new file mode 100644 index 00000000..903c7d9f --- /dev/null +++ b/components/esp8266/source/xtensa_context.S @@ -0,0 +1,338 @@ +/******************************************************************************* +Copyright (c) 2006-2009 by Tensilica Inc. ALL RIGHTS RESERVED. +These coded instructions, statements, and computer programs are the +copyrighted works and confidential proprietary information of Tensilica Inc. +They may not be modified, copied, reproduced, distributed, or disclosed to +third parties in any manner, medium, or form, in whole or in part, without +the prior written consent of Tensilica Inc. +-------------------------------------------------------------------------------- + + XTENSA CONTEXT SAVE AND RESTORE ROUTINES + +Low-level Call0 functions for handling generic context save and restore of +registers not specifically addressed by the interrupt vectors and handlers. +Those registers (not handled by these functions) are PC, PS, A0, A1 (SP). +Except for the calls to RTOS functions, this code is generic to Xtensa. + +Note that in Call0 ABI, interrupt handlers are expected to preserve the callee- +save regs (A12-A15), which is always the case if the handlers are coded in C. +However A12, A13 are made available as scratch registers for interrupt dispatch +code, so are presumed saved anyway, and are always restored even in Call0 ABI. +Only A14, A15 are truly handled as callee-save regs. + +Because Xtensa is a configurable architecture, this port supports all user +generated configurations (except restrictions stated in the release notes). +This is accomplished by conditional compilation using macros and functions +defined in the Xtensa HAL (hardware adaptation layer) for your configuration. +Only the processor state included in your configuration is saved and restored, +including any processor state added by user configuration options or TIE. + +*******************************************************************************/ + +/* Warn nicely if this file gets named with a lowercase .s instead of .S: */ +#define NOERROR # +NOERROR: .error "C preprocessor needed for this file: make sure its filename\ + ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option." + + +#include "freertos/xtensa_rtos.h" + +// .section .iram.text + .section .text + +/******************************************************************************* + +_xt_context_save + + !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !! + +Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the +interrupt stack frame defined in xtensa_rtos.h. +Its counterpart is _xt_context_restore (which also restores A12, A13). + +Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame. +This function preserves A12 & A13 in order to provide the caller with 2 scratch +regs that need not be saved over the call to this function. The choice of which +2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw, +to avoid moving data more than necessary. Caller can assign regs accordingly. + +Entry Conditions: + A0 = Return address in caller. + A1 = Stack pointer of interrupted thread or handler ("interruptee"). + Original A12, A13 have already been saved in the interrupt stack frame. + Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the + point of interruption. + If windowed ABI, PS.EXCM = 1 (exceptions disabled). + +Exit conditions: + A0 = Return address in caller. + A1 = Stack pointer of interrupted thread or handler ("interruptee"). + A12, A13 as at entry (preserved). + If windowed ABI, PS.EXCM = 1 (exceptions disabled). + +*******************************************************************************/ + + .global _xt_context_save + .type _xt_context_save,@function + .align 4 +_xt_context_save: + + s32i a2, sp, XT_STK_A2 + s32i a3, sp, XT_STK_A3 + s32i a4, sp, XT_STK_A4 + s32i a5, sp, XT_STK_A5 + s32i a6, sp, XT_STK_A6 + s32i a7, sp, XT_STK_A7 + s32i a8, sp, XT_STK_A8 + s32i a9, sp, XT_STK_A9 + s32i a10, sp, XT_STK_A10 + s32i a11, sp, XT_STK_A11 + + /* + Call0 ABI callee-saved regs a12-15 do not need to be saved here. + a12-13 are the caller's responsibility so it can use them as scratch. + So only need to save a14-a15 here for Windowed ABI (not Call0). + */ + #ifndef __XTENSA_CALL0_ABI__ + s32i a14, sp, XT_STK_A14 + s32i a15, sp, XT_STK_A15 + #endif + + rsr a3, SAR + s32i a3, sp, XT_STK_SAR + + #if XCHAL_HAVE_LOOPS + rsr a3, LBEG + s32i a3, sp, XT_STK_LBEG + rsr a3, LEND + s32i a3, sp, XT_STK_LEND + rsr a3, LCOUNT + s32i a3, sp, XT_STK_LCOUNT + #endif + + #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__) + mov a9, a0 /* preserve ret addr */ + #endif + + #ifndef __XTENSA_CALL0_ABI__ + /* + To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15. + Need to save a9,12,13 temporarily (in frame temps) and recover originals. + Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow + and underflow exceptions disabled (assured by PS.EXCM == 1). + */ + s32i a12, sp, XT_STK_TMP+0 /* temp. save stuff in stack frame */ + s32i a13, sp, XT_STK_TMP+4 + s32i a9, sp, XT_STK_TMP+8 + l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */ + l32i a13, sp, XT_STK_A13 + l32i a9, sp, XT_STK_A9 + addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */ + call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */ + addi sp, sp, -XT_STK_FRMSZ + l32i a12, sp, XT_STK_TMP+0 /* recover stuff from stack frame */ + l32i a13, sp, XT_STK_TMP+4 + l32i a9, sp, XT_STK_TMP+8 + #endif + + #if XCHAL_EXTRA_SA_SIZE > 0 + /* + NOTE: Normally the xthal_save_extra_nw macro only affects address + registers a2-a5. It is theoretically possible for Xtensa processor + designers to write TIE that causes more address registers to be + affected, but it is generally unlikely. If that ever happens, + more registers need to be saved/restored around this macro invocation. + Here we assume a9,12,13 are preserved. + Future Xtensa tools releases might limit the regs that can be affected. + */ + addi a2, sp, XT_STK_EXTRA /* where to save it */ + call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */ + #endif + + #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__) + mov a0, a9 /* retrieve ret addr */ + #endif + + ret + +/******************************************************************************* + +_xt_context_restore + + !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !! + +Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0 +ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt +stack frame defined in xtensa_rtos.h . +Its counterpart is _xt_context_save (whose caller saved A12, A13). + +Caller is responsible to restore PC, PS, A0, A1 (SP). + +Entry Conditions: + A0 = Return address in caller. + A1 = Stack pointer of interrupted thread or handler ("interruptee"). + +Exit conditions: + A0 = Return address in caller. + A1 = Stack pointer of interrupted thread or handler ("interruptee"). + Other processor state except PC, PS, A0, A1 (SP), is as at the point + of interruption. + +*******************************************************************************/ + + .global _xt_context_restore + .type _xt_context_restore,@function + .align 4 +_xt_context_restore: + + #if XCHAL_EXTRA_SA_SIZE > 0 + /* + NOTE: Normally the xthal_restore_extra_nw macro only affects address + registers a2-a5. It is theoretically possible for Xtensa processor + designers to write TIE that causes more address registers to be + affected, but it is generally unlikely. If that ever happens, + more registers need to be saved/restored around this macro invocation. + Here we only assume a13 is preserved. + Future Xtensa tools releases might limit the regs that can be affected. + */ + mov a13, a0 /* preserve ret addr */ + addi a2, sp, XT_STK_EXTRA /* where to find it */ + call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */ + mov a0, a13 /* retrieve ret addr */ + #endif + + #if XCHAL_HAVE_LOOPS + l32i a2, sp, XT_STK_LBEG + l32i a3, sp, XT_STK_LEND + wsr a2, LBEG + l32i a2, sp, XT_STK_LCOUNT + wsr a3, LEND + wsr a2, LCOUNT + #endif + + l32i a3, sp, XT_STK_SAR + l32i a2, sp, XT_STK_A2 + wsr a3, SAR + l32i a3, sp, XT_STK_A3 + l32i a4, sp, XT_STK_A4 + l32i a5, sp, XT_STK_A5 + l32i a6, sp, XT_STK_A6 + l32i a7, sp, XT_STK_A7 + l32i a8, sp, XT_STK_A8 + l32i a9, sp, XT_STK_A9 + l32i a10, sp, XT_STK_A10 + l32i a11, sp, XT_STK_A11 + + /* + Call0 ABI callee-saved regs a12-15 do not need to be restored here. + However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(), + so need to be restored anyway, despite being callee-saved in Call0. + */ + l32i a12, sp, XT_STK_A12 + l32i a13, sp, XT_STK_A13 + #ifndef __XTENSA_CALL0_ABI__ + l32i a14, sp, XT_STK_A14 + l32i a15, sp, XT_STK_A15 + #endif + + ret + + +/******************************************************************************* + +_xt_coproc_init + +Initializes global co-processor management data, setting all co-processors +to "unowned". Leaves CPENABLE as it found it (does NOT clear it). + +Called during initialization of the RTOS, before any threads run. + +This may be called from normal Xtensa single-threaded application code which +might use co-processors. The Xtensa run-time initialization enables all +co-processors. They must remain enabled here, else a co-processor exception +might occur outside of a thread, which the exception handler doesn't expect. + +Entry Conditions: + Xtensa single-threaded run-time environment is in effect. + No thread is yet running. + +Exit conditions: + None. + +Obeys ABI conventions per prototype: + void _xt_coproc_init(void) + +*******************************************************************************/ + +#if XCHAL_CP_NUM > 0 + + .global _xt_coproc_init + .type _xt_coproc_init,@function + .align 4 +_xt_coproc_init: + ENTRY0 + + /* Initialize thread co-processor ownerships to 0 (unowned). */ + movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */ + addi a3, a2, XCHAL_CP_MAX << 2 /* a3 = top+1 of owner array */ + movi a4, 0 /* a4 = 0 (unowned) */ +1: s32i a4, a2, 0 + addi a2, a2, 4 + bltu a2, a3, 1b + + RET0 + +#endif + + +/******************************************************************************* + +_xt_coproc_release + +Releases any and all co-processors owned by a given thread. The thread is +identified by it's co-processor state save area defined in xtensa_context.h . + +Must be called before a thread's co-proc save area is deleted to avoid +memory corruption when the exception handler tries to save the state. +May be called when a thread terminates or completes but does not delete +the co-proc save area, to avoid the exception handler having to save the +thread's co-proc state before another thread can use it (optimization). + +Entry Conditions: + A2 = Pointer to base of co-processor state save area. + +Exit conditions: + None. + +Obeys ABI conventions per prototype: + void _xt_coproc_release(void * coproc_sa_base) + +*******************************************************************************/ + +#if XCHAL_CP_NUM > 0 + + .global _xt_coproc_release + .type _xt_coproc_release,@function + .align 4 +_xt_coproc_release: + ENTRY0 /* a2 = base of save area */ + + movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */ + addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */ + movi a5, 0 /* a5 = 0 (unowned) */ + + rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */ + +1: l32i a7, a3, 0 /* a7 = owner at a3 */ + bne a2, a7, 2f /* if (coproc_sa_base == owner) */ + s32i a5, a3, 0 /* owner = unowned */ +2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */ + bltu a3, a4, 1b /* repeat until end of array */ + +3: wsr a6, PS /* restore interrupts */ + + RET0 + +#endif + + diff --git a/components/esp8266/source/xtensa_init.c b/components/esp8266/source/xtensa_init.c new file mode 100644 index 00000000..9e59599e --- /dev/null +++ b/components/esp8266/source/xtensa_init.c @@ -0,0 +1,55 @@ +/******************************************************************************* +Copyright (c) 2006-2009 by Tensilica Inc. ALL RIGHTS RESERVED. +These coded instructions, statements, and computer programs are the +copyrighted works and confidential proprietary information of Tensilica Inc. +They may not be modified, copied, reproduced, distributed, or disclosed to +third parties in any manner, medium, or form, in whole or in part, without +the prior written consent of Tensilica Inc. +-------------------------------------------------------------------------------- + + XTENSA INITIALIZATION ROUTINES CODED IN C + +This header is a place to put miscellaneous Xtensa RTOS-generic initialization +functions that are implemented in C. + +This header contains definitions and macros for use primarily by Xtensa +RTOS assembly coded source files. It includes and uses the Xtensa hardware +abstraction layer (HAL) to deal with config specifics. It may also be +included in C source files. + +*******************************************************************************/ + +#ifndef XTENSA_INIT_H +#define XTENSA_INIT_H + +#ifdef XT_BOARD +#include +#endif + +#include "freertos/xtensa_rtos.h" + + +#ifdef XT_RTOS_TIMER_INT +#ifndef XT_CLOCK_FREQ + +unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */ + +/* +Compute and initialize at run-time the tick divisor (the number of +processor clock cycles in an RTOS tick, used to set the tick timer). +Called when the processor clock frequency is not known at compile-time. +*/ +void ICACHE_FLASH_ATTR +_xt_tick_divisor_init(void) +{ + #ifdef XT_BOARD + _xt_tick_divisor = xtbsp_clock_freq_hz() / XT_TICK_PER_SEC; + #else + #error "No way to obtain processor clock frequency" + #endif /* XT_BOARD */ +} + +#endif /* XT_CLOCK_FREQ */ +#endif /* XT_RTOS_TIMER_INT */ + +#endif /* XTENSA_INIT_H */ diff --git a/components/esp8266/source/xtensa_vectors.S b/components/esp8266/source/xtensa_vectors.S new file mode 100644 index 00000000..4e88b0d0 --- /dev/null +++ b/components/esp8266/source/xtensa_vectors.S @@ -0,0 +1,2238 @@ +/******************************************************************************* +Copyright (c) 2006-2008 by Tensilica Inc. ALL RIGHTS RESERVED. +These coded instructions, statements, and computer programs are the +copyrighted works and confidential proprietary information of Tensilica Inc. +They may not be modified, copied, reproduced, distributed, or disclosed to +third parties in any manner, medium, or form, in whole or in part, without +the prior written consent of Tensilica Inc. +-------------------------------------------------------------------------------- + + XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS + +Xtensa low level exception and interrupt vectors and handlers for an RTOS. + +Interrupt handlers and user exception handlers support interaction with +the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and +after user's specific interrupt handlers. These macros are defined in +xtensa_.h to call suitable functions in a specific RTOS. The user +may insert application-specific handlers at indicated places for each +hardware interrupt priority/level. These handlers may be coded in C and +called from the Xtensa low level handlers. Optional hooks are provided +to install a handler per level at run-time, made available by compiling +this source file with '-DXT_INTEXC_HOOKS' (useful for automated testing). + +!! This file is a template that usually needs to be modified to handle !! +!! application specific interrupts. Search USER_EDIT for helpful comments !! +!! on where to insert handlers and how to write them. !! + +Because Xtensa is a configurable architecture, this port supports all user +generated configurations (except restrictions stated in the release notes). +This is accomplished by conditional compilation using macros and functions +defined in the Xtensa HAL (hardware adaptation layer) for your configuration. +Only the relevant parts of this file will be included in your RTOS build. +For example, this file provides interrupt vector templates for all types and +all priority levels, but only the ones in your configuration are built. + +NOTES on the use of 'call0' for long jumps instead of 'j': + 1. This file should be assembled with the -mlongcalls option to xt-xcc. + 2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to + a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the + distance from the call to the destination. The linker then relaxes + it back to 'call0 dest' if it determines that dest is within range. + This allows more flexibility in locating code without the performance + overhead of the 'l32r' literal data load in cases where the destination + is in range of 'call0'. There is an additional benefit in that 'call0' + has a longer range than 'j' due to the target being word-aligned, so + the 'l32r' sequence is less likely needed. + 3. The use of 'call0' with -mlongcalls requires that register a0 not be + live at the time of the call, which is always the case for a function + call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'. + 4. This use of 'call0' is independent of the C function call ABI. + +*******************************************************************************/ + #include + #include + #include + #include "xtos-internal.h" + #include "freertos/xtensa_rtos.h" + +#ifdef FOR_BAOFENG + +#define _INTERRUPT_LEVEL 3 + +STRUCT_BEGIN +STRUCT_FIELD (long,4,HESF_,SAR) +STRUCT_FIELD (long,4,HESF_,WINDOWSTART) +STRUCT_FIELD (long,4,HESF_,WINDOWBASE) +STRUCT_FIELD (long,4,HESF_,EPC1) +STRUCT_FIELD (long,4,HESF_,EXCCAUSE) +STRUCT_FIELD (long,4,HESF_,EXCVADDR) +STRUCT_FIELD (long,4,HESF_,EXCSAVE1) +STRUCT_FIELD (long,4,HESF_,VPRI) /* (XEA1 only) */ +#if XCHAL_HAVE_MAC16 +STRUCT_FIELD (long,4,HESF_,ACCLO) +STRUCT_FIELD (long,4,HESF_,ACCHI) +/*STRUCT_AFIELD(long,4,HESF_,MR, 4)*/ +#endif +#if XCHAL_HAVE_LOOPS +STRUCT_FIELD (long,4,HESF_,LCOUNT) +STRUCT_FIELD (long,4,HESF_,LBEG) +STRUCT_FIELD (long,4,HESF_,LEND) +#endif +STRUCT_AFIELD(long,4,HESF_,AREG, 64) /* address registers ar0..ar63 */ +#define HESF_AR(n) HESF_AREG+((n)*4) +STRUCT_END(HighPriFrame) +#define HESF_TOTALSIZE HighPriFrameSize+32 /* 32 bytes for interrupted code's save areas under SP */ + + +#if XCHAL_HAVE_XEA1 && HAVE_XSR /* could be made true for T1040 and T1050 */ +# error "high-priority interrupt stack frame needs adjustment if HAVE_XSR is allowed with XEA1" +#endif + + +#define PRI_N_STACK_SIZE 2048 /* default to 1 kB stack for each level-N handling */ + + + // Allocate save area and stack: + // (must use .bss, not .comm, because the subsequent .set does not work otherwise) + .section .bss, "aw" + .align 16 +LABEL(_Pri_,_Stack): .space PRI_N_STACK_SIZE + HESF_TOTALSIZE + +#if HAVE_XSR + .data + .global LABEL(_Pri_,_HandlerAddress) +LABEL(_Pri_,_HandlerAddress): .space 4 +#endif + + .section .UserEnter.text, "ax" + .global call_user_start + .type call_user_start,@function + .align 4 + .literal_position +call_user_start: + + movi a2, 0x40100000 /* note: absolute symbol, not a ptr */ + wsr a2, vecbase + call0 user_start /* user exception handler */ + +#if 0 +/* +Panic handler. +Should be reached by call0 (preferable) or jump only. If call0, a0 says where +from. If on simulator, display panic message and abort, else loop indefinitely. +*/ +// .section .iram.text + .section .text + .global _xt_panic + .type _xt_panic,@function + .align 4 +_xt_panic: + #ifdef XT_SIMULATOR + addi a4, a0, -3 /* point to call0 */ + movi a3, _xt_panic_message + movi a2, SYS_log_msg + simcall + movi a2, SYS_gdb_abort + simcall + #else + rsil a2, XCHAL_EXCM_LEVEL /* disable all low & med ints */ +1: j 1b /* loop infinitely */ + #endif + + .section .rodata, "a" + .align 4 +_xt_panic_message: + .string "\n*** _xt_panic() was called from 0x%08x or jumped to. ***\n" +#endif + +#ifdef XT_INTEXC_HOOKS + /* + Hooks to dynamically install handlers for exceptions and interrupts. + Allows automated regression frameworks to install handlers per test. + Consists of an array of function pointers indexed by interrupt level, + with index 0 containing the entry for user exceptions. + Initialized with all 0s, meaning no handler is installed at each level. + See comment in xtensa_rtos.h for more details. + */ + .data + .global _xt_intexc_hooks + .type _xt_intexc_hooks,@object + .align 4 +_xt_intexc_hooks: + .fill XT_INTEXC_HOOK_NUM, 4, 0 +#endif + + +/******************************************************************************* + +EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS +(except window exception vectors). + +Each vector goes at a predetermined location according to the Xtensa +hardware configuration, which is ensured by its placement in a special +section known to the Xtensa linker support package (LSP). It performs +the minimum necessary before jumping to the handler in the .text section. + +The corresponding handler goes in the normal .text section. It sets up +the appropriate stack frame, saves a few vector-specific registers and +calls XT_RTOS_INT_ENTER to save the rest of the interrupted context +and enter the RTOS, then sets up a C environment. It then calls the +user's interrupt handler code (which may be coded in C) and finally +calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling. + +While XT_RTOS_INT_EXIT does not return directly to the interruptee, +eventually the RTOS scheduler will want to dispatch the interrupted +task or handler. The scheduler will return to the exit point that was +saved in the interrupt stack frame at XT_STK_EXIT. + +*******************************************************************************/ + +/* +-------------------------------------------------------------------------------- +Debug Exception. +-------------------------------------------------------------------------------- +*/ + +#if XCHAL_HAVE_DEBUG + + .begin literal_prefix .DebugExceptionVector + .section .DebugExceptionVector.text, "ax" + .global _DebugExceptionVector + .align 4 + .literal_position +_DebugExceptionVector: + + #ifdef XT_SIMULATOR + /* + In the simulator, let the debugger (if any) handle the debug exception, + or simply stop the simulation: + */ + wsr a2, EXCSAVE+XCHAL_DEBUGLEVEL /* save a2 where sim expects it */ + movi a2, SYS_gdb_enter_sktloop + simcall /* have ISS handle debug exc. */ + #elif 0 /* change condition to 1 to use the HAL minimal debug handler */ + wsr a3, EXCSAVE+XCHAL_DEBUGLEVEL + movi a3, xthal_debugexc_defhndlr_nw /* use default debug handler */ + jx a3 + #else + wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* save original a0 somewhere */ + call0 user_fatal_exception_handler /* does not return */ + rfi XCHAL_DEBUGLEVEL /* make a0 point here not later */ + #endif + + .end literal_prefix + +#endif + +/* +-------------------------------------------------------------------------------- +Double Exception. +Double exceptions are not a normal occurrence. They indicate a bug of some kind. +-------------------------------------------------------------------------------- +*/ + +#ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR + + .begin literal_prefix .DoubleExceptionVector + .section .DoubleExceptionVector.text, "ax" + .global _DoubleExceptionVector + .align 4 + .literal_position +_DoubleExceptionVector: + + #if XCHAL_HAVE_DEBUG + break 1, 4 /* unhandled double exception */ + #endif + call0 user_fatal_exception_handler /* does not return */ + rfde /* make a0 point here not later */ + + .end literal_prefix + +#endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */ + +/* +-------------------------------------------------------------------------------- +Kernel Exception (including Level 1 Interrupt from kernel mode). +-------------------------------------------------------------------------------- +*/ + + .begin literal_prefix .KernelExceptionVector + .section .KernelExceptionVector.text, "ax" + .global _KernelExceptionVector + .align 4 + .literal_position +_KernelExceptionVector: + + wsr a0, EXCSAVE_1 /* preserve a0 */ + call0 _xt_kernel_exc /* kernel exception handler */ + /* never returns here - call0 is used as a jump (see note at top) */ + + .end literal_prefix + +// .section .iram.text + .section .text + .align 4 + .literal_position +_xt_kernel_exc: + #if XCHAL_HAVE_DEBUG + break 1, 0 /* unhandled kernel exception */ + #endif + call0 user_fatal_exception_handler /* does not return */ + rfe /* make a0 point here not there */ + + +/* +-------------------------------------------------------------------------------- +User Exception (including Level 1 Interrupt from user mode). +-------------------------------------------------------------------------------- +*/ + + .begin literal_prefix .UserExceptionVector + .section .UserExceptionVector.text, "ax" + .global _UserExceptionVector + .type _UserExceptionVector,@function + .align 4 + .literal_position +_UserExceptionVector: + + wsr a0, EXCSAVE_1 /* preserve a0 */ + call0 _xt_user_exc /* user exception handler */ + /* never returns here - call0 is used as a jump (see note at top) */ + + .end literal_prefix + +// .section .iram.text + .section .text + /* + Insert some waypoints for jumping beyond the signed 8-bit range + of conditional branch instructions, so the conditional branchces + to specific-cause exception handlers are not taken in the mainline. + Saves some cycles in the mainline. + */ + + #if XCHAL_HAVE_WINDOWED + .align 4 +_xt_to_alloca_exc: + call0 _xt_alloca_exc /* in window vectors section */ + /* never returns here - call0 is used as a jump (see note at top) */ + #endif + + .align 4 + .literal_position +_xt_to_syscall_exc: + call0 _xt_syscall_exc + /* never returns here - call0 is used as a jump (see note at top) */ + + /* User exception handler begins here. */ + .type _xt_user_exc,@function + .align 4 +_xt_user_exc: + /* + Handle alloca and syscall exceptions before allocating stack frame and + interacting with RTOS. + */ + rsr a0, EXCCAUSE + #if XCHAL_HAVE_WINDOWED + beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc + #endif + beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc + + /* Allocate interrupt stack frame and save minimal context. */ + mov a0, sp /* sp == a1 */ + addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */ + s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */ + rsr a0, PS /* save interruptee's PS */ + s32i a0, sp, XT_STK_PS + rsr a0, EPC_1 /* save interruptee's PC */ + s32i a0, sp, XT_STK_PC + rsr a0, EXCSAVE_1 /* save interruptee's a0 */ + s32i a0, sp, XT_STK_A0 + movi a0, _xt_user_exit /* save exit point for dispatch */ + s32i a0, sp, XT_STK_EXIT + + /* + Handle co-processor exceptions after allocating stack frame and before + interacting with RTOS. + */ + #if XCHAL_CP_NUM > 0 + rsr a0, EXCCAUSE + bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc +.L_xt_user_exc_not_coproc: + #endif + + /* Save rest of interrupt context and enter RTOS. */ + call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */ + + /* !! We are now on the RTOS system stack !! */ + + /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */ + #ifdef __XTENSA_CALL0_ABI__ + movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM + #else + movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE + #endif + wsr a0, PS + rsync + + /* !! It is OK to call C handlers after this point. !! */ + + /* Handle exceptions per EXCCAUSE. */ + rsr a2, EXCCAUSE /* a2 = exception cause */ + beqi a2, EXCCAUSE_LEVEL1INTERRUPT, .L_xt_user_int /* level 3 int */ + + #ifdef XT_INTEXC_HOOKS + /* + Call exception hook to pre-handle exceptions (if installed). + Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling). + */ + movi a0, _xt_intexc_hooks + l32i a0, a0, 0 /* user exception hook index 0 */ + beqz a0, 1f +.Ln_xt_user_exc_call_hook: + #ifdef __XTENSA_CALL0_ABI__ + callx0 a0 + beqi a2, -1, .L_xt_user_done + #else + mov a6, a2 + callx4 a0 + beqi a6, -1, .L_xt_user_done + mov a2, a6 + #endif +1: + #endif + + /* USER_EDIT: + ADD ANY CUSTOM EXCEPTION HANDLER CODE HERE, OR CALL C HANDLER. + The exeption cause is in A2. After handling, jump to .L_xt_user_int . + Note on Call0 ABI: Callee-saved regs (a12-15) have not yet been saved, + so should not be corrupted here. A C handler will not corrupt them. + */ + + /* If we get here, the exception has not been handled. */ +.Ln_xt_user_unhandled: + #if XCHAL_HAVE_DEBUG + break 1, 1 /* unhandled user exception */ + #endif + call0 user_fatal_exception_handler + + /* Handle level 1 interrupts. OK to enable med-pri interrupts now. */ +.L_xt_user_int: + rsil a0, 1 /* reenable ints above level 1 */ + + /* + Get mask of pending, enabled interrupts at this level into a2. + Comment this out if there is only one interrupt at this level. + */ + rsr a2, INTENABLE + rsr a3, INTERRUPT + movi a4, XCHAL_INTLEVEL1_MASK + and a2, a2, a3 + and a2, a2, a4 + + #ifdef XT_INTEXC_HOOKS + /* Call interrupt hook if present to (pre)handle interrupts. */ + movi a0, _xt_intexc_hooks + l32i a0, a0, 1<<2 + beqz a0, 2f +.Ln_xt_user_int_call_hook: + #ifdef __XTENSA_CALL0_ABI__ + callx0 a0 + beqz a2, .L_xt_user_done + #else + mov a6, a2 + callx4 a0 + beqz a6, .L_xt_user_done + mov a2, a6 + #endif +2: + #endif + + /* USER_EDIT: + ADD LOW PRIORITY LEVEL 1 INTERRUPT HANDLER CODE HERE, OR CALL C HANDLER. + At this point, a2 contains a mask of pending, enabled ints at this level. + Note on Call0 ABI: Callee-saved regs (a12-15) have not yet been saved, + so should not be corrupted here. A C handler will not corrupt them. + HANDLER MUST CAUSE LEVEL TRIGGERED INTERRUPT REQUESTS TO BE DEASSERTED. + When done, ensure a2 contains a mask of unhandled (still pending) + enabled ints at this level, and fall through. + */ + + #if XT_TIMER_INTPRI == 1 +.Ln_xt_user_int_timer: + movi a3, 0xFFBF + and a3, a2, a3 + bnez a3, 3f + /* Interrupt handler for the RTOS tick timer if at this level. */ + movi a3, XT_TIMER_INTEN /* timer interrupt bit */ + /*bnone a2, a3, 3f*/ + #ifdef __XTENSA_CALL0_ABI__ + sub a12, a2, a3 /* clear timer int and save mask */ + call0 XT_RTOS_TIMER_INT + mov a2, a12 /* recover mask of remaining ints */ + beqz a2, 4f + #else + call4 XT_RTOS_TIMER_INT /* a2 automatically preserved */ + sub a2, a2, a3 /* clear timer int from mask */ + #endif +3: + call0 _xt_isr_handler + bnez a2, .Ln_xt_user_int_timer + #endif +4: + /* All interrupts at this level should have been handled now. */ + beqz a2, .L_xt_user_done + + /* If we get here, we have an unhandled interrupt. */ + #if XCHAL_HAVE_DEBUG + break 1, 1 /* unhandled user exception */ + /* EXCCAUSE == 4 (level 1 int) */ + #endif + call0 user_fatal_exception_handler + + /* Done handling after XT_RTOS_INT_ENTER. Give control to RTOS. */ +.L_xt_user_done: + call0 XT_RTOS_INT_EXIT /* does not return directly here */ + + /* + Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT + on entry and used to return to a thread or interrupted interrupt handler. + */ + .global _xt_user_exit + .type _xt_user_exit,@function + .align 4 +_xt_user_exit: + l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */ + wsr a0, PS + l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */ + wsr a0, EPC_1 + l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */ + l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */ + rsync /* ensure PS and EPC written */ + rfe /* PS.EXCM is cleared */ + +/* +-------------------------------------------------------------------------------- +Syscall Exception Handler (jumped to from User Exception Handler). +Syscall 0 is required to spill the register windows (no-op in Call 0 ABI). +Only syscall 0 is handled here. Other syscalls return -1 to caller in a2. +-------------------------------------------------------------------------------- +*/ + +// .section .iram.text + .section .text + .type _xt_syscall_exc,@function + .align 4 +_xt_syscall_exc: + + #ifdef __XTENSA_CALL0_ABI__ + /* + Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI. + Use a minimal stack frame (16B) to save A2 & A3 for scratch. + PS.EXCM could be cleared here, but unlikely to improve worst-case latency. + rsr a0, PS + addi a0, a0, -PS_EXCM_MASK + wsr a0, PS + */ + addi sp, sp, -16 + s32i a2, sp, 8 + s32i a3, sp, 12 + #else /* Windowed ABI */ + /* + Save necessary context and spill the register windows. + PS.EXCM is still set and must remain set until after the spill. + Reuse context save function though it saves more than necessary. + For this reason, a full interrupt stack frame is allocated. + */ + addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */ + s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */ + s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */ + call0 _xt_context_save + #endif + + /* + Grab the interruptee's PC and skip over the 'syscall' instruction. + If it's at the end of a zero-overhead loop and it's not on the last + iteration, decrement loop counter and skip to beginning of loop. + */ + rsr a2, EPC_1 /* a2 = PC of 'syscall' */ + addi a3, a2, 3 /* ++PC */ + #if XCHAL_HAVE_LOOPS + rsr a0, LEND /* if (PC == LEND */ + bne a3, a0, 1f + rsr a0, LCOUNT /* && LCOUNT != 0) */ + beqz a0, 1f /* { */ + addi a0, a0, -1 /* --LCOUNT */ + rsr a3, LBEG /* PC = LBEG */ + wsr a0, LCOUNT /* } */ + #endif +1: wsr a3, EPC_1 /* update PC */ + + /* Restore interruptee's context and return from exception. */ + #ifdef __XTENSA_CALL0_ABI__ + l32i a2, sp, 8 + l32i a3, sp, 12 + addi sp, sp, 16 + #else + call0 _xt_context_restore + addi sp, sp, XT_STK_FRMSZ + #endif + movi a0, -1 + movnez a2, a0, a2 /* return -1 if not syscall 0 */ + rsr a0, EXCSAVE_1 + rfe + + +/* +Currently only shells for high priority interrupt handlers are provided +here. However a template and example can be found in the Tensilica tools +documentation: "Microprocessor Programmer's Guide". +*/ + +#if XCHAL_HAVE_NMI + + .begin literal_prefix .NMIExceptionVector + .section .NMIExceptionVector.text, "ax" + .global _NMIExceptionVector + .type _NMIExceptionVector,@function + .align 4 + .literal_position +_NMIExceptionVector: + wsr a0, EXCSAVE + XCHAL_NMILEVEL _ /* preserve a0 */ + call0 _xt_nmi /* load interrupt handler */ + /* never returns here - call0 is used as a jump (see note at top) */ + + .end literal_prefix + +// .section .iram.text + .section .text + .type _xt_nmi,@function + .align 4 +_xt_nmi: + + #ifdef XT_INTEXC_HOOKS + /* Call interrupt hook if present to (pre)handle interrupts. */ + movi a0, _xt_intexc_hooks + l32i a0, a0, XCHAL_NMILEVEL<<2 + beqz a0, 1f +.Ln_xt_nmi_call_hook: + callx0 a0 /* must NOT disturb stack! */ +1: + #endif + + /* USER_EDIT: + ADD HIGH PRIORITY NON-MASKABLE INTERRUPT (NMI) HANDLER CODE HERE. + */ + + movi a0, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // get ptr to save area + // interlock + + // Save a few registers so we can do some work: + s32i a2, a0, HESF_AR(2) +#if HAVE_XSR + //movi a0, LABEL(_Level,FromVector) // this dispatcher's address + movi a2, LABEL(_Pri_,_HandlerAddress) // dispatcher address var. + s32i a1, a0, HESF_AR(1) + l32i a2, a2, 0 // get dispatcher address + s32i a3, a0, HESF_AR(3) + xsr a2, EXCSAVE_LEVEL // get saved a0, restore dispatcher address +#else + rsr a2, EXCSAVE_LEVEL // get saved a0 + s32i a1, a0, HESF_AR(1) + s32i a3, a0, HESF_AR(3) +#endif + s32i a4, a0, HESF_AR(4) + s32i a2, a0, HESF_AR(0) + + // Save/restore all exception state + // (IMPORTANT: this code assumes no general exceptions occur + // during the execution of this dispatcher until this state + // is completely saved and from the point it is restored.) + // + // Exceptions that may normally occur within the C handler + // include window exceptions (affecting EPC1), alloca exceptions + // (affecting EPC1/EXCCAUSE and its handling uses EXCSAVE1), + // and possibly others depending on the particular C handler + // (possibly needing save/restore of EXCVADDR; and EXCVADDR + // is also possibly corrupted by any access thru an auto-refill + // way on a processor with a full MMU). + // + rsr a3, EPC1 + rsr a4, EXCCAUSE + s32i a3, a0, HESF_EPC1 + s32i a4, a0, HESF_EXCCAUSE +#if !XCHAL_HAVE_XEA1 + rsr a3, EXCVADDR + s32i a3, a0, HESF_EXCVADDR +#endif + rsr a4, EXCSAVE1 + s32i a4, a0, HESF_EXCSAVE1 + +#ifdef __XTENSA_WINDOWED_ABI__ + // Save remainder of entire address register file (!): + movi a2, XCHAL_NUM_AREGS - 8 // how many saved so far +#endif + + s32i a5, a0, HESF_AR(5) + s32i a6, a0, HESF_AR(6) + s32i a7, a0, HESF_AR(7) + +1: s32i a8, a0, HESF_AR(8) + s32i a9, a0, HESF_AR(9) + s32i a10, a0, HESF_AR(10) + s32i a11, a0, HESF_AR(11) + s32i a12, a0, HESF_AR(12) + s32i a13, a0, HESF_AR(13) + s32i a14, a0, HESF_AR(14) + s32i a15, a0, HESF_AR(15) + +#ifdef __XTENSA_WINDOWED_ABI__ + addi a8, a2, -8 + addi a10, a0, 8*4 + rotw 2 + bnez a2, 1b // loop until done + + rotw 2 + // back to original a2 ... + + // Save a few other registers required for C: + rsr a3, WINDOWSTART + rsr a4, WINDOWBASE + s32i a3, a0, HESF_WINDOWSTART + s32i a4, a0, HESF_WINDOWBASE + + // Setup window registers for first caller: + movi a3, 1 + movi a4, 0 + wsr a3, WINDOWSTART + wsr a4, WINDOWBASE + rsync + + // Note: register window has rotated, ie. a0..a15 clobbered. + +#endif /* __XTENSA_WINDOWED_ABI__ */ + + movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // get ptr to save area (is also initial stack ptr) + movi a0, 0 // mark start of call frames in stack + + // Critical state saved, a bit more to do to allow window exceptions... + + // We now have a C-coherent stack and window state. + // Still have to fix PS while making sure interrupts stay disabled + // at the appropriate level (ie. level 2 and below are disabled in this case). + +#if XCHAL_HAVE_XEA1 + movi a7, _xtos_intstruct // address of interrupt management globals + rsilft a3, _INTERRUPT_LEVEL, XTOS_LOCKLEVEL // lockout + movi a4, ~INTLEVEL_N_BELOW_MASK // mask out all interrupts at this level or lower + l32i a3, a7, XTOS_VPRI_ENABLED_OFS // read previous _xtos_vpri_enabled + l32i a5, a7, XTOS_ENABLED_OFS // read _xtos_enabled + s32i a4, a7, XTOS_VPRI_ENABLED_OFS // set new _xtos_vpri_enabled (mask interrupts as if at _INTERRUPT_LEVEL) + s32i a3, a1, HESF_VPRI // save previous vpri + movi a2, 0x50020 // WOE=1, UM=1, INTLEVEL=0 + and a3, a5, a4 // mask out selected interrupts + wsr a3, INTENABLE // disable all low-priority interrupts +#else + // Load PS for C code, clear EXCM (NOTE: this step is different for XEA1): +# ifdef __XTENSA_CALL0_ABI__ + movi a2, 0x00020 + _INTERRUPT_LEVEL // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=0, RING=0 +# else + movi a2, 0x50020 + _INTERRUPT_LEVEL // WOE=1, CALLINC=1, UM=1, INTLEVEL=N, EXCM=0, RING=0 +# endif + +#endif + wsr a2, PS // update PS to enable window exceptions, etc as per above + rsync + + // Okay, window exceptions can now happen (although we have to call + // deep before any will happen because we've reset WINDOWSTART). + + // Save other state that might get clobbered by C code: + +////////////////// COMMON DISPATCH CODE BEGIN + + rsr a14, SAR + s32i a14, a1, HESF_SAR +#if XCHAL_HAVE_LOOPS + rsr a14, LCOUNT + s32i a14, a1, HESF_LCOUNT + rsr a14, LBEG + s32i a14, a1, HESF_LBEG + rsr a14, LEND + s32i a14, a1, HESF_LEND +#endif +#if XCHAL_HAVE_MAC16 + rsr a14, ACCLO + s32i a14, a1, HESF_ACCLO + rsr a14, ACCHI + s32i a14, a1, HESF_ACCHI +#endif + +#ifdef __XTENSA_CALL0_ABI__ +#ifdef FOR_BAOFENG + movi a13, NMI_Handler +#else + movi a13, wDev_ProcessFiq +#endif + callx0 a13 // call interrupt's C handler +#else + movi a13, NMI_Handler + callx4 a13 // call interrupt's C handler +#endif + + // Restore everything, and return. + + // Three temp registers are required for this code to be optimal (no interlocks) in + // T2xxx microarchitectures with 7-stage pipe; otherwise only two + // registers would be needed. + // +#if XCHAL_HAVE_LOOPS + l32i a13, a1, HESF_LCOUNT + l32i a14, a1, HESF_LBEG + l32i a15, a1, HESF_LEND + wsr a13, LCOUNT + wsr a14, LBEG + wsr a15, LEND +#endif + +#if XCHAL_HAVE_MAC16 + l32i a13, a1, HESF_ACCLO + l32i a14, a1, HESF_ACCHI + wsr a13, ACCLO + wsr a14, ACCHI +#endif + l32i a15, a1, HESF_SAR + wsr a15, SAR + +////////////////// COMMON DISPATCH CODE END + +#if XCHAL_HAVE_XEA1 + // Here, a7 = address of interrupt management globals + l32i a4, a1, HESF_VPRI // restore previous vpri + rsil a3, XTOS_LOCKLEVEL // lockout + l32i a5, a7, XTOS_ENABLED_OFS // read _xtos_enabled + s32i a4, a7, XTOS_VPRI_ENABLED_OFS // set new _xtos_vpri_enabled + movi a2, 0x00020 + _INTERRUPT_LEVEL // WOE=0, UM=1, INTLEVEL=N + /* movi a2, 0x00020 + 0 // WOE=0, UM=1, INTLEVEL=N */ + and a3, a5, a4 // mask out selected interrupts + wsr a3, INTENABLE // disable all low-priority interrupts +#else + // Load PS for interrupt exit, set EXCM: + movi a2, 0x00030 + _INTERRUPT_LEVEL // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=1, RING=0 + /* movi a2, 0x00030 + 0 // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=1, RING=0 */ +#endif + wsr a2, PS // update PS to disable window exceptions, etc as per above + rsync + + // NOTE: here for XEA1, restore INTENABLE etc... + +#ifdef __XTENSA_WINDOWED_ABI__ + // Restore window registers: + l32i a2, a1, HESF_WINDOWSTART + l32i a3, a1, HESF_WINDOWBASE + wsr a2, WINDOWSTART + wsr a3, WINDOWBASE + rsync + // Note: register window has rotated, ie. a0..a15 clobbered. + + // Reload initial stack pointer: + movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // - 16 + movi a6, XCHAL_NUM_AREGS - 8 // how many saved so far + addi a7, a1, -8*4 + + // Restore entire register file (!): + +1: + addi a14, a6, -8 + addi a15, a7, 8*4 + l32i a4, a15, HESF_AR(4) + l32i a5, a15, HESF_AR(5) + l32i a6, a15, HESF_AR(6) + l32i a7, a15, HESF_AR(7) + l32i a8, a15, HESF_AR(8) + l32i a9, a15, HESF_AR(9) + l32i a10,a15, HESF_AR(10) + l32i a11,a15, HESF_AR(11) + rotw 2 + bnez a6, 1b // loop until done + + l32i a4, a7, HESF_AR(12) + l32i a5, a7, HESF_AR(13) + l32i a6, a7, HESF_AR(14) + l32i a7, a7, HESF_AR(15) + rotw 2 + + // back to original a1 ... + +#else /* Call0 ABI: */ + + l32i a4, a1, HESF_AR(4) // restore general registers + l32i a5, a1, HESF_AR(5) + l32i a6, a1, HESF_AR(6) + l32i a7, a1, HESF_AR(7) + l32i a8, a1, HESF_AR(8) + l32i a9, a1, HESF_AR(9) + l32i a10, a1, HESF_AR(10) + l32i a11, a1, HESF_AR(11) + l32i a12, a1, HESF_AR(12) + l32i a13, a1, HESF_AR(13) + l32i a14, a1, HESF_AR(14) + l32i a15, a1, HESF_AR(15) + +#endif /* __XTENSA_WINDOWED_ABI__ */ + + // Restore exception state: + l32i a2, a1, HESF_EPC1 + l32i a3, a1, HESF_EXCCAUSE + wsr a2, EPC1 + wsr a3, EXCCAUSE +#if !XCHAL_HAVE_XEA1 + l32i a2, a1, HESF_EXCVADDR + wsr a2, EXCVADDR +#endif + l32i a3, a1, HESF_EXCSAVE1 + wsr a3, EXCSAVE1 + + l32i a0, a1, HESF_AR(0) + + l32i a2, a1, HESF_AR(2) + l32i a3, a1, HESF_AR(3) + l32i a1, a1, HESF_AR(1) + + rfi XCHAL_NMILEVEL + +#if 0 + .align 4 +.L_xt_nmi_exit: + rsr a0, EXCSAVE + XCHAL_NMILEVEL /* restore a0 */ + rfi XCHAL_NMILEVEL +#endif +#endif /* NMI */ + + +#else /*********************Seperate from BAOFENG and NORMAL********************/ + + +#define _INTERRUPT_LEVEL 3 + +STRUCT_BEGIN +STRUCT_FIELD (long,4,HESF_,SAR) +#ifdef __XTENSA_WINDOWED_ABI__ +STRUCT_FIELD (long,4,HESF_,WINDOWSTART) +STRUCT_FIELD (long,4,HESF_,WINDOWBASE) +#endif +STRUCT_FIELD (long,4,HESF_,EPC1) +STRUCT_FIELD (long,4,HESF_,EXCCAUSE) +STRUCT_FIELD (long,4,HESF_,EXCVADDR) +STRUCT_FIELD (long,4,HESF_,EXCSAVE1) +STRUCT_FIELD (long,4,HESF_,EPC3) +STRUCT_FIELD (long,4,HESF_,EPS3) +#if XCHAL_HAVE_XEA1 +STRUCT_FIELD (long,4,HESF_,VPRI) /* (XEA1 only) */ +#endif +#if XCHAL_HAVE_MAC16 +STRUCT_FIELD (long,4,HESF_,ACCLO) +STRUCT_FIELD (long,4,HESF_,ACCHI) +/*STRUCT_AFIELD(long,4,HESF_,MR, 4)*/ +#endif +#if XCHAL_HAVE_LOOPS +STRUCT_FIELD (long,4,HESF_,LCOUNT) +STRUCT_FIELD (long,4,HESF_,LBEG) +STRUCT_FIELD (long,4,HESF_,LEND) +#endif +#ifdef __XTENSA_WINDOWED_ABI__ +STRUCT_AFIELD(long,4,HESF_,AREG, 64) /* address registers ar0..ar63 */ +#else +STRUCT_AFIELD(long,4,HESF_,AREG, 16) /* address registers ar0..ar15 */ +#endif +#define HESF_AR(n) HESF_AREG+((n)*4) +STRUCT_END(HighPriFrame) +#define HESF_TOTALSIZE HighPriFrameSize+32 /* 32 bytes for interrupted code's save areas under SP */ + + +#if XCHAL_HAVE_XEA1 && HAVE_XSR /* could be made true for T1040 and T1050 */ +# error "high-priority interrupt stack frame needs adjustment if HAVE_XSR is allowed with XEA1" +#endif + + +#define PRI_N_STACK_SIZE 512 /* default to 1 kB stack for each level-N handling */ +#define PRI_N_STACK_SIZE2 256 /* default to 1 kB stack for each level-N handling */ + + + // Allocate save area and stack: + // (must use .bss, not .comm, because the subsequent .set does not work otherwise) + .section .bss, "aw" + .align 16 +LABEL(_Pri_,_Stack): .space PRI_N_STACK_SIZE + HESF_TOTALSIZE + PRI_N_STACK_SIZE2 + HESF_TOTALSIZE + + .balign 16 +LoadStoreErrorHandlerStack: + .word 0 # a0 + .word 0 # (unused) + .word 0 # a2 + .word 0 # a3 + .word 0 # a4 + .word 0 # a5 + .word 0 # a6 + +#if HAVE_XSR + .data + .global LABEL(_Pri_,_HandlerAddress) +LABEL(_Pri_,_HandlerAddress): .space 4 + .global LABEL(_Pri_, _NMICount) +LABEL(_Pri_,_NMICount): .space 4 +#endif + +/*************************** LoadStoreError Handler **************************/ + + .section .text + +/* Xtensa "Load/Store Exception" handler: + * Completes L8/L16 load instructions from Instruction address space, for which + * the architecture only supports 32-bit reads. + * + * Called from UserExceptionVector if EXCCAUSE is LoadStoreErrorCause + * + * (Fast path (no branches) is for L8UI) + */ + .literal_position + + .balign 4 +LoadStoreErrorHandler: +// .global LoadStoreErrorHandler + .type LoadStoreErrorHandler, @function + + /* Registers are saved in the address corresponding to their register + * number times 4. This allows a quick and easy mapping later on when + * needing to store the value to a particular register number. */ + mov a0, sp + movi sp, LoadStoreErrorHandlerStack + s32i a0, sp, 0 + s32i a2, sp, 0x08 + s32i a3, sp, 0x0c + s32i a4, sp, 0x10 + rsr a0, sar # Save SAR in a0 to restore later + + /* Examine the opcode which generated the exception */ + /* Note: Instructions are in this order to avoid pipeline stalls. */ + rsr a2, epc1 + movi a3, ~3 + ssa8l a2 # sar is now correct shift for aligned read + and a2, a2, a3 # a2 now 4-byte aligned address of instruction + l32i a4, a2, 0 + l32i a2, a2, 4 + movi a3, 0x00700F # opcode mask for l8ui/l16si/l16ui + src a2, a2, a4 # a2 now instruction that failed + and a3, a2, a3 # a3 is masked instruction + + # This is store instruction + movi a4, 0x004002 + beq a3, a4, .LSE_check_s8i_store # s8i + + movi a4, 0x005002 + beq a3, a4, .LSE_check_s16i_store # s16i + + bnei a3, 0x000002, .LSE_check_l16 + + /* Note: At this point, opcode could technically be one of two things: + * xx0xx2 (L8UI) + * xx8xx2 (Reserved (invalid) opcode) + * It is assumed that we'll never get to this point from an illegal + * opcode, so we don't bother to check for that case and presume this + * is always an L8UI. */ + + movi a4, ~3 + rsr a3, excvaddr # read faulting address + and a4, a3, a4 # a4 now word aligned read address + + l32i a4, a4, 0 # perform the actual read + ssa8l a3 # sar is now shift to extract a3's byte + srl a3, a4 # shift right correct distance + extui a4, a3, 0, 8 # mask off bits we need for an l8 + +.LSE_post_fetch: + /* We jump back here after either the L8UI or the L16*I routines do the + * necessary work to read the value from memory. + * At this point, a2 holds the faulting instruction and a4 holds the + * correctly read value. + + * Restore original SAR value (saved in a0) and update EPC so we'll + * return back to the instruction following the one we just emulated */ + + /* Note: Instructions are in this order to avoid pipeline stalls */ + rsr a3, epc1 + wsr a0, sar + addi a3, a3, 0x3 + wsr a3, epc1 + + /* Stupid opcode tricks: The jumptable we use later on needs 16 bytes + * per entry (so we can avoid a second jump by just doing a RFE inside + * each entry). Unfortunately, however, Xtensa doesn't have an addx16 + * operation to make that easy for us. Luckily, all of the faulting + * opcodes we're processing are guaranteed to have bit 3 be zero, which + * means if we just shift the register bits of the opcode down by 3 + * instead of 4, we will get the register number multiplied by 2. This + * combined with an addx8 will give us an effective addx16 without + * needing any extra shift operations. */ + extui a2, a2, 3, 5 # a2 is now destination register 0-15 times 2 + + bgei a2, 10, .LSE_assign_reg # a5..a15 use jumptable + beqi a2, 2, .LSE_assign_a1 # a1 uses a special routine + + /* We're storing into a0 or a2..a4, which are all saved in our "stack" + * area. Calculate the correct address and stick the value in there, + * then just do our normal restore and RFE (no jumps required, which + * actually makes a0..a4 substantially faster). */ + addx2 a2, a2, sp + s32i a4, a2, 0 + + /* Restore all regs and return */ + l32i a0, sp, 0 + l32i a2, sp, 0x08 + l32i a3, sp, 0x0c + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 # restore a1 saved by UserExceptionVector + rfe + +.LSE_assign_reg: + /* At this point, a2 contains the register number times 2, a4 is the + * read value. */ + + /* Calculate the jumptable address, and restore all regs except a2 and + * a4 so we have less to do after jumping. */ + /* Note: Instructions are in this order to avoid pipeline stalls. */ + movi a3, .LSE_jumptable_base + l32i a0, sp, 0 + addx8 a2, a2, a3 # a2 is now the address to jump to + l32i a3, sp, 0x0c + + jx a2 + + .balign 4 +.LSE_check_l16: + /* At this point, a2 contains the opcode, a3 is masked opcode */ + movi a4, 0x001002 # l16si or l16ui opcode after masking + bne a3, a4, .LSE_wrong_opcode + + /* Note: At this point, the opcode could be one of two things: + * xx1xx2 (L16UI) + * xx9xx2 (L16SI) + * Both of these we can handle. */ + + movi a4, ~3 + rsr a3, excvaddr # read faulting address + and a4, a3, a4 # a4 now word aligned read address + + l32i a4, a4, 0 # perform the actual read + ssa8l a3 # sar is now shift to extract a3's bytes + srl a3, a4 # shift right correct distance + extui a4, a3, 0, 16 # mask off bits we need for an l16 + + bbci a2, 15, .LSE_post_fetch # Not a signed op + bbci a4, 15, .LSE_post_fetch # Value does not need sign-extension + + movi a3, 0xFFFF0000 + or a4, a3, a4 # set 32-bit sign bits + j .LSE_post_fetch + + .balign 4 +.LSE_check_s8i_store: + s32i a5, sp, 0x14 + s32i a6, sp, 0x18 + movi a5,0xff + j .LSE_check_store + +.LSE_check_s16i_store: + s32i a5, sp, 0x14 + s32i a6, sp, 0x18 + movi a5,0xffff + j .LSE_check_store + +.LSE_check_store: + movi a4, ~3 + rsr a3, excvaddr # write faulting address + and a4, a3, a4 # a4 now word aligned write address + ssa8b a3 + l32i a3, a4, 0 # perform the actual read + + mov a4,a5 + sll a4,a4 + movi a6,-1 + xor a4,a6,a4 + and a3,a3,a4 + + movi a4, ~3 + rsr a6, excvaddr # write faulting address + and a4, a6, a4 # a4 now word aligned write address + + extui a2, a2, 4, 4 # a2 is now destination register 0-15 times 2 + + bgei a2,7,.LSE_big_reg + movi a6,4 + mull a6,a2,a6 + add a2, a6, sp + l32i a2, a2, 0 + j .Write_data + +.LSE_big_reg: + movi a6,7 + sub a2,a2,a6 + movi a6,8 + mull a2,a2,a6 + + movi a6,.LSE_big_reg_table + add a2,a2,a6 + jx a2 + +.balign 4 +.LSE_big_reg_table: + .org .LSE_big_reg_table + (0*(2*4)) + mov a2,a7 + j .Write_data + + .org .LSE_big_reg_table + (1*(2*4)) + mov a2,a8 + j .Write_data + + .org .LSE_big_reg_table + (2*(2*4)) + mov a2,a9 + j .Write_data + + .org .LSE_big_reg_table + (3*(2*4)) + mov a2,a10 + j .Write_data + + .org .LSE_big_reg_table + (4*(2*4)) + mov a2,a11 + j .Write_data + + .org .LSE_big_reg_table + (5*(2*4)) + mov a2,a12 + j .Write_data + + .org .LSE_big_reg_table + (6*(2*4)) + mov a2,a13 + j .Write_data + + .org .LSE_big_reg_table + (7*(2*4)) + mov a2,a14 + j .Write_data + + .org .LSE_big_reg_table + (8*(2*4)) + mov a2,a15 + j .Write_data + +.Write_data: + and a2,a2,a5 + sll a2,a2 + or a3,a3,a2 + + s32i a3,a4,0 + + rsr a3, epc1 + wsr a0, sar + addi a3, a3, 0x3 + wsr a3, epc1 + + /* Restore all regs and return */ + l32i a0, sp, 0 + l32i a2, sp, 0x08 + l32i a3, sp, 0x0c + l32i a4, sp, 0x10 + l32i a5, sp, 0x14 + l32i a6, sp, 0x18 + mov a1, a0 + rsr a0, excsave1 # restore a1 saved by UserExceptionVector + rfe + +.LSE_wrong_opcode: + /* If we got here it's not an opcode we can try to fix, so bomb out. + * Restore registers so any dump the fatal exception routine produces + * will have correct values */ + wsr a0, sar + l32i a0, sp, 0 + l32i a2, sp, 0x08 + l32i a3, sp, 0x0c + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + call0 user_fatal_exception_handler + + .balign 4 +.LSE_assign_a1: + /* a1 is saved in excsave1, so just update that with the value, */ + //wsr a4, excsave1 + s32i a4, sp, 0x04 + /* Then restore all regs and return */ + l32i a0, sp, 0 + l32i a2, sp, 0x08 + l32i a3, sp, 0x0c + l32i a4, sp, 0x10 + l32i a1, sp, 0x04 + rsr a0, excsave1 + rfe + + .balign 4 +.LSE_jumptable: + /* The first 5 entries (80 bytes) of this table are unused (registers + * a0..a4 are handled separately above). Rather than have a whole bunch + * of wasted space, we just pretend that the table starts 80 bytes + * earlier in memory. */ + .set .LSE_jumptable_base, .LSE_jumptable - (16 * 5) + + .org .LSE_jumptable_base + (16 * 5) + mov a5, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 6) + mov a6, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 7) + mov a7, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 8) + mov a8, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 9) + mov a9, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 10) + mov a10, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 11) + mov a11, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 12) + mov a12, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 13) + mov a13, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 14) + mov a14, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .org .LSE_jumptable_base + (16 * 15) + mov a15, a4 + l32i a2, sp, 0x08 + l32i a4, sp, 0x10 + mov a1, a0 + rsr a0, excsave1 + rfe + + .section .UserEnter.text, "ax" + .global call_user_start + .type call_user_start,@function + .align 4 + .literal_position +call_user_start: + + movi a2, 0x40100000 /* note: absolute symbol, not a ptr */ + wsr a2, vecbase + call0 user_start /* user exception handler */ + +#if 0 +/* +Panic handler. +Should be reached by call0 (preferable) or jump only. If call0, a0 says where +from. If on simulator, display panic message and abort, else loop indefinitely. +*/ +// .section .iram.text + .section .text + .global _xt_panic + .type _xt_panic,@function + .align 4 +_xt_panic: + #ifdef XT_SIMULATOR + addi a4, a0, -3 /* point to call0 */ + movi a3, _xt_panic_message + movi a2, SYS_log_msg + simcall + movi a2, SYS_gdb_abort + simcall + #else + rsil a2, XCHAL_EXCM_LEVEL /* disable all low & med ints */ +1: j 1b /* loop infinitely */ + #endif + + .section .rodata, "a" + .align 4 +_xt_panic_message: + .string "\n*** _xt_panic() was called from 0x%08x or jumped to. ***\n" +#endif + +#ifdef XT_INTEXC_HOOKS + /* + Hooks to dynamically install handlers for exceptions and interrupts. + Allows automated regression frameworks to install handlers per test. + Consists of an array of function pointers indexed by interrupt level, + with index 0 containing the entry for user exceptions. + Initialized with all 0s, meaning no handler is installed at each level. + See comment in xtensa_rtos.h for more details. + */ + .data + .global _xt_intexc_hooks + .type _xt_intexc_hooks,@object + .align 4 +_xt_intexc_hooks: + .fill XT_INTEXC_HOOK_NUM, 4, 0 +#endif + + +/******************************************************************************* + +EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS +(except window exception vectors). + +Each vector goes at a predetermined location according to the Xtensa +hardware configuration, which is ensured by its placement in a special +section known to the Xtensa linker support package (LSP). It performs +the minimum necessary before jumping to the handler in the .text section. + +The corresponding handler goes in the normal .text section. It sets up +the appropriate stack frame, saves a few vector-specific registers and +calls XT_RTOS_INT_ENTER to save the rest of the interrupted context +and enter the RTOS, then sets up a C environment. It then calls the +user's interrupt handler code (which may be coded in C) and finally +calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling. + +While XT_RTOS_INT_EXIT does not return directly to the interruptee, +eventually the RTOS scheduler will want to dispatch the interrupted +task or handler. The scheduler will return to the exit point that was +saved in the interrupt stack frame at XT_STK_EXIT. + +*******************************************************************************/ + +/* +-------------------------------------------------------------------------------- +Debug Exception. +-------------------------------------------------------------------------------- +*/ + +#if XCHAL_HAVE_DEBUG + + .begin literal_prefix .DebugExceptionVector + .section .DebugExceptionVector.text, "ax" + .global _DebugExceptionVector + .align 4 + .literal_position +_DebugExceptionVector: + + #ifdef XT_SIMULATOR + /* + In the simulator, let the debugger (if any) handle the debug exception, + or simply stop the simulation: + */ + wsr a2, EXCSAVE+XCHAL_DEBUGLEVEL /* save a2 where sim expects it */ + movi a2, SYS_gdb_enter_sktloop + simcall /* have ISS handle debug exc. */ + #elif 0 /* change condition to 1 to use the HAL minimal debug handler */ + wsr a3, EXCSAVE+XCHAL_DEBUGLEVEL + movi a3, xthal_debugexc_defhndlr_nw /* use default debug handler */ + jx a3 + #else + wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* save original a0 somewhere */ + call0 user_fatal_exception_handler /* does not return */ + rfi XCHAL_DEBUGLEVEL /* make a0 point here not later */ + #endif + + .end literal_prefix + +#endif + +/* +-------------------------------------------------------------------------------- +Double Exception. +Double exceptions are not a normal occurrence. They indicate a bug of some kind. +-------------------------------------------------------------------------------- +*/ + +#ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR + + .begin literal_prefix .DoubleExceptionVector + .section .DoubleExceptionVector.text, "ax" + .global _DoubleExceptionVector + .align 4 + .literal_position +_DoubleExceptionVector: + + #if XCHAL_HAVE_DEBUG + break 1, 4 /* unhandled double exception */ + #endif + call0 user_fatal_exception_handler /* does not return */ + rfde /* make a0 point here not later */ + + .end literal_prefix + +#endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */ + +/* +-------------------------------------------------------------------------------- +Kernel Exception (including Level 1 Interrupt from kernel mode). +-------------------------------------------------------------------------------- +*/ + + .begin literal_prefix .KernelExceptionVector + .section .KernelExceptionVector.text, "ax" + .global _KernelExceptionVector + .align 4 + .literal_position +_KernelExceptionVector: + + wsr a0, EXCSAVE_1 /* preserve a0 */ + call0 _xt_kernel_exc /* kernel exception handler */ + /* never returns here - call0 is used as a jump (see note at top) */ + + .end literal_prefix + +// .section .iram.text + .section .text + .align 4 + .literal_position +_xt_kernel_exc: + #if XCHAL_HAVE_DEBUG + break 1, 0 /* unhandled kernel exception */ + #endif + call0 user_fatal_exception_handler /* does not return */ + rfe /* make a0 point here not there */ + + +/* +-------------------------------------------------------------------------------- +User Exception (including Level 1 Interrupt from user mode). +-------------------------------------------------------------------------------- +*/ + + .begin literal_prefix .UserExceptionVector + .section .UserExceptionVector.text, "ax" + .global _UserExceptionVector + .type _UserExceptionVector,@function + .align 4 + .literal_position +_UserExceptionVector: + + wsr a0, EXCSAVE_1 /* preserve a0 */ + wsr a1, EXCSAVE_2 + call0 _xt_user_exc /* user exception handler */ + /* never returns here - call0 is used as a jump (see note at top) */ + + .end literal_prefix + +// .section .iram.text + .section .text + /* + Insert some waypoints for jumping beyond the signed 8-bit range + of conditional branch instructions, so the conditional branchces + to specific-cause exception handlers are not taken in the mainline. + Saves some cycles in the mainline. + */ + + #if XCHAL_HAVE_WINDOWED + .align 4 +_xt_to_alloca_exc: + call0 _xt_alloca_exc /* in window vectors section */ + /* never returns here - call0 is used as a jump (see note at top) */ + #endif + + .align 4 + .literal_position +_xt_to_syscall_exc: + call0 _xt_syscall_exc + /* never returns here - call0 is used as a jump (see note at top) */ + + /* User exception handler begins here. */ + .type _xt_user_exc,@function + .align 4 +_xt_user_exc: + /* + Handle alloca and syscall exceptions before allocating stack frame and + interacting with RTOS. + */ + rsr a0, EXCCAUSE + #if XCHAL_HAVE_WINDOWED + beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc + #endif + beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc + beqi a0, EXCCAUSE_LOAD_STORE_ERROR, LoadStoreErrorHandler + + /* Allocate interrupt stack frame and save minimal context. */ + mov a0, sp /* sp == a1 */ + addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */ + s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */ + rsr a0, PS /* save interruptee's PS */ + s32i a0, sp, XT_STK_PS + rsr a0, EPC_1 /* save interruptee's PC */ + s32i a0, sp, XT_STK_PC + rsr a0, EXCSAVE_1 /* save interruptee's a0 */ + s32i a0, sp, XT_STK_A0 + movi a0, _xt_user_exit /* save exit point for dispatch */ + s32i a0, sp, XT_STK_EXIT + + /* + Handle co-processor exceptions after allocating stack frame and before + interacting with RTOS. + */ + #if XCHAL_CP_NUM > 0 + rsr a0, EXCCAUSE + bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc +.L_xt_user_exc_not_coproc: + #endif + + /* Save rest of interrupt context and enter RTOS. */ + call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */ + + /* !! We are now on the RTOS system stack !! */ + + /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */ + #ifdef __XTENSA_CALL0_ABI__ + movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM + #else + movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE + #endif + wsr a0, PS + rsync + + /* !! It is OK to call C handlers after this point. !! */ + + /* Handle exceptions per EXCCAUSE. */ + rsr a2, EXCCAUSE /* a2 = exception cause */ + beqi a2, EXCCAUSE_LEVEL1INTERRUPT, .L_xt_user_int /* level 3 int */ + + #ifdef XT_INTEXC_HOOKS + /* + Call exception hook to pre-handle exceptions (if installed). + Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling). + */ + movi a0, _xt_intexc_hooks + l32i a0, a0, 0 /* user exception hook index 0 */ + beqz a0, 1f +.Ln_xt_user_exc_call_hook: + #ifdef __XTENSA_CALL0_ABI__ + callx0 a0 + beqi a2, -1, .L_xt_user_done + #else + mov a6, a2 + callx4 a0 + beqi a6, -1, .L_xt_user_done + mov a2, a6 + #endif +1: + #endif + + /* USER_EDIT: + ADD ANY CUSTOM EXCEPTION HANDLER CODE HERE, OR CALL C HANDLER. + The exeption cause is in A2. After handling, jump to .L_xt_user_int . + Note on Call0 ABI: Callee-saved regs (a12-15) have not yet been saved, + so should not be corrupted here. A C handler will not corrupt them. + */ + + /* If we get here, the exception has not been handled. */ +.Ln_xt_user_unhandled: + #if XCHAL_HAVE_DEBUG + break 1, 1 /* unhandled user exception */ + #endif + call0 user_fatal_exception_handler + + /* Handle level 1 interrupts. OK to enable med-pri interrupts now. */ +.L_xt_user_int: + rsil a0, 1 /* reenable ints above level 1 */ + + /* + Get mask of pending, enabled interrupts at this level into a2. + Comment this out if there is only one interrupt at this level. + */ + rsr a2, INTENABLE + rsr a3, INTERRUPT + movi a4, XCHAL_INTLEVEL1_MASK + and a2, a2, a3 + and a2, a2, a4 + + #ifdef XT_INTEXC_HOOKS + /* Call interrupt hook if present to (pre)handle interrupts. */ + movi a0, _xt_intexc_hooks + l32i a0, a0, 1<<2 + beqz a0, 2f +.Ln_xt_user_int_call_hook: + #ifdef __XTENSA_CALL0_ABI__ + callx0 a0 + beqz a2, .L_xt_user_done + #else + mov a6, a2 + callx4 a0 + beqz a6, .L_xt_user_done + mov a2, a6 + #endif +2: + #endif + + /* USER_EDIT: + ADD LOW PRIORITY LEVEL 1 INTERRUPT HANDLER CODE HERE, OR CALL C HANDLER. + At this point, a2 contains a mask of pending, enabled ints at this level. + Note on Call0 ABI: Callee-saved regs (a12-15) have not yet been saved, + so should not be corrupted here. A C handler will not corrupt them. + HANDLER MUST CAUSE LEVEL TRIGGERED INTERRUPT REQUESTS TO BE DEASSERTED. + When done, ensure a2 contains a mask of unhandled (still pending) + enabled ints at this level, and fall through. + */ + + #if XT_TIMER_INTPRI == 1 +.Ln_xt_user_int_timer: + movi a3, 0xFFBF + and a3, a2, a3 + bnez a3, 3f + /* Interrupt handler for the RTOS tick timer if at this level. */ + movi a3, XT_TIMER_INTEN /* timer interrupt bit */ + /*bnone a2, a3, 3f*/ + #ifdef __XTENSA_CALL0_ABI__ + sub a12, a2, a3 /* clear timer int and save mask */ + call0 XT_RTOS_TIMER_INT + mov a2, a12 /* recover mask of remaining ints */ + beqz a2, 4f + #else + call4 XT_RTOS_TIMER_INT /* a2 automatically preserved */ + sub a2, a2, a3 /* clear timer int from mask */ + #endif +3: + call0 _xt_isr_handler + bnez a2, .Ln_xt_user_int_timer + #endif +4: + /* All interrupts at this level should have been handled now. */ + beqz a2, .L_xt_user_done + + /* If we get here, we have an unhandled interrupt. */ + #if XCHAL_HAVE_DEBUG + break 1, 1 /* unhandled user exception */ + /* EXCCAUSE == 4 (level 1 int) */ + #endif + call0 user_fatal_exception_handler + + /* Done handling after XT_RTOS_INT_ENTER. Give control to RTOS. */ +.L_xt_user_done: + call0 XT_RTOS_INT_EXIT /* does not return directly here */ + + /* + Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT + on entry and used to return to a thread or interrupted interrupt handler. + */ + .global _xt_user_exit + .type _xt_user_exit,@function + .align 4 +_xt_user_exit: + l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */ + wsr a0, PS + l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */ + wsr a0, EPC_1 + l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */ + l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */ + rsync /* ensure PS and EPC written */ + rfe /* PS.EXCM is cleared */ + +/* +-------------------------------------------------------------------------------- +Syscall Exception Handler (jumped to from User Exception Handler). +Syscall 0 is required to spill the register windows (no-op in Call 0 ABI). +Only syscall 0 is handled here. Other syscalls return -1 to caller in a2. +-------------------------------------------------------------------------------- +*/ + +// .section .iram.text + .section .text + .type _xt_syscall_exc,@function + .align 4 +_xt_syscall_exc: + + #ifdef __XTENSA_CALL0_ABI__ + /* + Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI. + Use a minimal stack frame (16B) to save A2 & A3 for scratch. + PS.EXCM could be cleared here, but unlikely to improve worst-case latency. + rsr a0, PS + addi a0, a0, -PS_EXCM_MASK + wsr a0, PS + */ + addi sp, sp, -16 + s32i a2, sp, 8 + s32i a3, sp, 12 + #else /* Windowed ABI */ + /* + Save necessary context and spill the register windows. + PS.EXCM is still set and must remain set until after the spill. + Reuse context save function though it saves more than necessary. + For this reason, a full interrupt stack frame is allocated. + */ + addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */ + s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */ + s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */ + call0 _xt_context_save + #endif + + /* + Grab the interruptee's PC and skip over the 'syscall' instruction. + If it's at the end of a zero-overhead loop and it's not on the last + iteration, decrement loop counter and skip to beginning of loop. + */ + rsr a2, EPC_1 /* a2 = PC of 'syscall' */ + addi a3, a2, 3 /* ++PC */ + #if XCHAL_HAVE_LOOPS + rsr a0, LEND /* if (PC == LEND */ + bne a3, a0, 1f + rsr a0, LCOUNT /* && LCOUNT != 0) */ + beqz a0, 1f /* { */ + addi a0, a0, -1 /* --LCOUNT */ + rsr a3, LBEG /* PC = LBEG */ + wsr a0, LCOUNT /* } */ + #endif +1: wsr a3, EPC_1 /* update PC */ + + /* Restore interruptee's context and return from exception. */ + #ifdef __XTENSA_CALL0_ABI__ + l32i a2, sp, 8 + l32i a3, sp, 12 + addi sp, sp, 16 + #else + call0 _xt_context_restore + addi sp, sp, XT_STK_FRMSZ + #endif + movi a0, -1 + movnez a2, a0, a2 /* return -1 if not syscall 0 */ + rsr a0, EXCSAVE_1 + rfe + + +/* +Currently only shells for high priority interrupt handlers are provided +here. However a template and example can be found in the Tensilica tools +documentation: "Microprocessor Programmer's Guide". +*/ + +#if XCHAL_HAVE_NMI + + .begin literal_prefix .NMIExceptionVector + .section .NMIExceptionVector.text, "ax" + .global _NMIExceptionVector + .type _NMIExceptionVector,@function + .align 4 + .literal_position +_NMIExceptionVector: + wsr a0, EXCSAVE + XCHAL_NMILEVEL _ /* preserve a0 */ + wsr a1, EXCSAVE_2 + call0 _xt_nmi /* load interrupt handler */ + /* never returns here - call0 is used as a jump (see note at top) */ + + .end literal_prefix + +// .section .iram.text + .section .text + .type _xt_nmi,@function + .align 4 +_xt_nmi: + + #ifdef XT_INTEXC_HOOKS + #error + /* Call interrupt hook if present to (pre)handle interrupts. */ + movi a0, _xt_intexc_hooks + l32i a0, a0, XCHAL_NMILEVEL<<2 + beqz a0, 1f +.Ln_xt_nmi_call_hook: + callx0 a0 /* must NOT disturb stack! */ +1: + #endif + + /* USER_EDIT: + ADD HIGH PRIORITY NON-MASKABLE INTERRUPT (NMI) HANDLER CODE HERE. + */ + + movi a0, LABEL(_Pri_,_NMICount) + l32i a0, a0, 0 + + bnez a0, nmi_reentry + movi a0, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // get ptr to save area + j nmi_common +nmi_reentry: + movi a0, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE + HESF_TOTALSIZE + PRI_N_STACK_SIZE2// get ptr to save area +nmi_common: + + // interlock + + // Save a few registers so we can do some work: + s32i a2, a0, HESF_AR(2) +#if HAVE_XSR + //movi a0, LABEL(_Level,FromVector) // this dispatcher's address + movi a2, LABEL(_Pri_,_HandlerAddress) // dispatcher address var. + s32i a1, a0, HESF_AR(1) + l32i a2, a2, 0 // get dispatcher address + s32i a3, a0, HESF_AR(3) + xsr a2, EXCSAVE_LEVEL // get saved a0, restore dispatcher address +#else + #error + rsr a2, EXCSAVE_LEVEL // get saved a0 + s32i a1, a0, HESF_AR(1) + s32i a3, a0, HESF_AR(3) +#endif + s32i a4, a0, HESF_AR(4) + s32i a2, a0, HESF_AR(0) + + // Save/restore all exception state + // (IMPORTANT: this code assumes no general exceptions occur + // during the execution of this dispatcher until this state + // is completely saved and from the point it is restored.) + // + // Exceptions that may normally occur within the C handler + // include window exceptions (affecting EPC1), alloca exceptions + // (affecting EPC1/EXCCAUSE and its handling uses EXCSAVE1), + // and possibly others depending on the particular C handler + // (possibly needing save/restore of EXCVADDR; and EXCVADDR + // is also possibly corrupted by any access thru an auto-refill + // way on a processor with a full MMU). + // + rsr a3, EPC1 + rsr a4, EXCCAUSE + s32i a3, a0, HESF_EPC1 + s32i a4, a0, HESF_EXCCAUSE +#if !XCHAL_HAVE_XEA1 + rsr a3, EXCVADDR + s32i a3, a0, HESF_EXCVADDR +#endif + rsr a4, EXCSAVE1 + s32i a4, a0, HESF_EXCSAVE1 + rsr a3, EPC3 + movi a4, nmi_rfi + beq a3, a4, nmi_reentried + s32i a3, a0, HESF_EPC3 + rsr a4, EPS3 + s32i a4, a0, HESF_EPS3 + +nmi_reentried: + +#ifdef __XTENSA_WINDOWED_ABI__ + // Save remainder of entire address register file (!): + movi a2, XCHAL_NUM_AREGS - 8 // how many saved so far +#endif + + s32i a5, a0, HESF_AR(5) + s32i a6, a0, HESF_AR(6) + s32i a7, a0, HESF_AR(7) + +1: s32i a8, a0, HESF_AR(8) + s32i a9, a0, HESF_AR(9) + s32i a10, a0, HESF_AR(10) + s32i a11, a0, HESF_AR(11) + s32i a12, a0, HESF_AR(12) + s32i a13, a0, HESF_AR(13) + s32i a14, a0, HESF_AR(14) + s32i a15, a0, HESF_AR(15) + +#ifdef __XTENSA_WINDOWED_ABI__ + addi a8, a2, -8 + addi a10, a0, 8*4 + rotw 2 + bnez a2, 1b // loop until done + + rotw 2 + // back to original a2 ... + + // Save a few other registers required for C: + rsr a3, WINDOWSTART + rsr a4, WINDOWBASE + s32i a3, a0, HESF_WINDOWSTART + s32i a4, a0, HESF_WINDOWBASE + + // Setup window registers for first caller: + movi a3, 1 + movi a4, 0 + wsr a3, WINDOWSTART + wsr a4, WINDOWBASE + rsync + + // Note: register window has rotated, ie. a0..a15 clobbered. + +#endif /* __XTENSA_WINDOWED_ABI__ */ + + //movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // get ptr to save area (is also initial stack ptr) + mov a1, a0 + movi a0, 0 // mark start of call frames in stack + + // Critical state saved, a bit more to do to allow window exceptions... + + // We now have a C-coherent stack and window state. + // Still have to fix PS while making sure interrupts stay disabled + // at the appropriate level (ie. level 2 and below are disabled in this case). + +#if XCHAL_HAVE_XEA1 + #error + movi a7, _xtos_intstruct // address of interrupt management globals + rsilft a3, _INTERRUPT_LEVEL, XTOS_LOCKLEVEL // lockout + movi a4, ~INTLEVEL_N_BELOW_MASK // mask out all interrupts at this level or lower + l32i a3, a7, XTOS_VPRI_ENABLED_OFS // read previous _xtos_vpri_enabled + l32i a5, a7, XTOS_ENABLED_OFS // read _xtos_enabled + s32i a4, a7, XTOS_VPRI_ENABLED_OFS // set new _xtos_vpri_enabled (mask interrupts as if at _INTERRUPT_LEVEL) + s32i a3, a1, HESF_VPRI // save previous vpri + movi a2, 0x50020 // WOE=1, UM=1, INTLEVEL=0 + and a3, a5, a4 // mask out selected interrupts + wsr a3, INTENABLE // disable all low-priority interrupts +#else + // Load PS for C code, clear EXCM (NOTE: this step is different for XEA1): +# ifdef __XTENSA_CALL0_ABI__ + movi a2, 0x00020 + _INTERRUPT_LEVEL // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=0, RING=0 +# else + movi a2, 0x50020 + _INTERRUPT_LEVEL // WOE=1, CALLINC=1, UM=1, INTLEVEL=N, EXCM=0, RING=0 +# endif + +#endif + wsr a2, PS // update PS to enable window exceptions, etc as per above + rsync + + // Okay, window exceptions can now happen (although we have to call + // deep before any will happen because we've reset WINDOWSTART). + + // Save other state that might get clobbered by C code: + +////////////////// COMMON DISPATCH CODE BEGIN + + rsr a14, SAR + s32i a14, a1, HESF_SAR +#if XCHAL_HAVE_LOOPS + #error + rsr a14, LCOUNT + s32i a14, a1, HESF_LCOUNT + rsr a14, LBEG + s32i a14, a1, HESF_LBEG + rsr a14, LEND + s32i a14, a1, HESF_LEND +#endif +#if XCHAL_HAVE_MAC16 + #error + rsr a14, ACCLO + s32i a14, a1, HESF_ACCLO + rsr a14, ACCHI + s32i a14, a1, HESF_ACCHI +#endif + + //save NMI Count + movi a2, LABEL(_Pri_,_NMICount) + l32i a3, a2, 0 + addi a3, a3, 1 + s32i a3, a2, 0 + +#ifdef __XTENSA_CALL0_ABI__ +// movi a13, pwm_tim1_intr_handler + movi a13, wDev_ProcessFiq + callx0 a13 // call interrupt's C handler +#else + movi a13, NMI_Handler + callx4 a13 // call interrupt's C handler +#endif + + //Restore NMI level + movi a2, LABEL(_Pri_,_NMICount) + l32i a3, a2, 0 + addi a3, a3, -1 + s32i a3, a2, 0 + + beqi a3, 1, nmi_reentry2 + movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // get ptr to save area + j nmi_common2 +nmi_reentry2: + movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE + HESF_TOTALSIZE + PRI_N_STACK_SIZE2// get ptr to save area +nmi_common2: + + + // Restore everything, and return. + + // Three temp registers are required for this code to be optimal (no interlocks) in + // T2xxx microarchitectures with 7-stage pipe; otherwise only two + // registers would be needed. + // +#if XCHAL_HAVE_LOOPS + l32i a13, a1, HESF_LCOUNT + l32i a14, a1, HESF_LBEG + l32i a15, a1, HESF_LEND + wsr a13, LCOUNT + wsr a14, LBEG + wsr a15, LEND +#endif + +#if XCHAL_HAVE_MAC16 + l32i a13, a1, HESF_ACCLO + l32i a14, a1, HESF_ACCHI + wsr a13, ACCLO + wsr a14, ACCHI +#endif + l32i a15, a1, HESF_SAR + wsr a15, SAR + +////////////////// COMMON DISPATCH CODE END + +#if XCHAL_HAVE_XEA1 + // Here, a7 = address of interrupt management globals + l32i a4, a1, HESF_VPRI // restore previous vpri + rsil a3, XTOS_LOCKLEVEL // lockout + l32i a5, a7, XTOS_ENABLED_OFS // read _xtos_enabled + s32i a4, a7, XTOS_VPRI_ENABLED_OFS // set new _xtos_vpri_enabled + movi a2, 0x00020 + _INTERRUPT_LEVEL // WOE=0, UM=1, INTLEVEL=N + /* movi a2, 0x00020 + 0 // WOE=0, UM=1, INTLEVEL=N */ + and a3, a5, a4 // mask out selected interrupts + wsr a3, INTENABLE // disable all low-priority interrupts +#else + // Load PS for interrupt exit, set EXCM: + movi a2, 0x00030 + _INTERRUPT_LEVEL // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=1, RING=0 + /* movi a2, 0x00030 + 0 // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=1, RING=0 */ +#endif + wsr a2, PS // update PS to disable window exceptions, etc as per above + rsync + + // NOTE: here for XEA1, restore INTENABLE etc... + +#ifdef __XTENSA_WINDOWED_ABI__ + // Restore window registers: + l32i a2, a1, HESF_WINDOWSTART + l32i a3, a1, HESF_WINDOWBASE + wsr a2, WINDOWSTART + wsr a3, WINDOWBASE + rsync + // Note: register window has rotated, ie. a0..a15 clobbered. + + // Reload initial stack pointer: + movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // - 16 + movi a6, XCHAL_NUM_AREGS - 8 // how many saved so far + addi a7, a1, -8*4 + + // Restore entire register file (!): + +1: + addi a14, a6, -8 + addi a15, a7, 8*4 + l32i a4, a15, HESF_AR(4) + l32i a5, a15, HESF_AR(5) + l32i a6, a15, HESF_AR(6) + l32i a7, a15, HESF_AR(7) + l32i a8, a15, HESF_AR(8) + l32i a9, a15, HESF_AR(9) + l32i a10,a15, HESF_AR(10) + l32i a11,a15, HESF_AR(11) + rotw 2 + bnez a6, 1b // loop until done + + l32i a4, a7, HESF_AR(12) + l32i a5, a7, HESF_AR(13) + l32i a6, a7, HESF_AR(14) + l32i a7, a7, HESF_AR(15) + rotw 2 + + // back to original a1 ... + +#else /* Call0 ABI: */ + + l32i a4, a1, HESF_AR(4) // restore general registers + l32i a5, a1, HESF_AR(5) + l32i a6, a1, HESF_AR(6) + l32i a7, a1, HESF_AR(7) + l32i a8, a1, HESF_AR(8) + l32i a9, a1, HESF_AR(9) + l32i a10, a1, HESF_AR(10) + l32i a11, a1, HESF_AR(11) + l32i a12, a1, HESF_AR(12) + l32i a13, a1, HESF_AR(13) + l32i a14, a1, HESF_AR(14) + l32i a15, a1, HESF_AR(15) + +#endif /* __XTENSA_WINDOWED_ABI__ */ + + // Restore exception state: + l32i a2, a1, HESF_EPC1 + l32i a3, a1, HESF_EXCCAUSE + wsr a2, EPC1 + wsr a3, EXCCAUSE +#if !XCHAL_HAVE_XEA1 + l32i a2, a1, HESF_EXCVADDR + wsr a2, EXCVADDR +#endif + l32i a3, a1, HESF_EXCSAVE1 + wsr a3, EXCSAVE1 + l32i a2, a1, HESF_EPC3 + wsr a2, EPC3 + l32i a3, a1, HESF_EPS3 + wsr a3, EPS3 + + l32i a0, a1, HESF_AR(0) + + /* Re-Open NMI */ + + rsr a3, SAR + movi a2, 0x3ff + slli a2,a2,20 + wsr a3, SAR + rsync + movi a3, 1 + s32i a3,a2,0 + + l32i a2, a1, HESF_AR(2) + l32i a3, a1, HESF_AR(3) + l32i a1, a1, HESF_AR(1) + +nmi_rfi: + rfi XCHAL_NMILEVEL + +#if 0 + .align 4 +.L_xt_nmi_exit: + rsr a0, EXCSAVE + XCHAL_NMILEVEL /* restore a0 */ + rfi XCHAL_NMILEVEL +#endif +#endif /* NMI */ + + + +#endif + +