arch: xtensa: fix nxstyle errors

Fix for errors reported by nxstyle tool

Signed-off-by: Alin Jerpelea <alin.jerpelea@sony.com>
This commit is contained in:
Alin Jerpelea 2021-04-07 13:59:24 +02:00 committed by Xiang Xiao
parent 55ca83cbc9
commit cb2ecefbf1
6 changed files with 481 additions and 414 deletions

View file

@ -31,6 +31,7 @@
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option
* is configured, and a value of 0 otherwise. These macros are always
* defined.
@ -64,8 +65,11 @@
#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
#define XCHAL_HAVE_ABS 1 /* ABS instruction */
/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
/* #define XCHAL_HAVE_POPC 0 POPC instruction */
/* #define XCHAL_HAVE_CRC 0 CRC instruction */
#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
#define XCHAL_HAVE_SPECULATION 0 /* speculation */
@ -121,6 +125,7 @@
#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
#define XCHAL_HAVE_DFP_ACCEL 1 /* double precision FP acceleration pkg */
#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
#define XCHAL_HAVE_DFPU_SINGLE_ONLY 1 /* DFPU Coprocessor, single precision only */
@ -155,7 +160,11 @@
* (1 = 5-stage, 2 = 7-stage) */
#define XCHAL_CLOCK_GATING_GLOBAL 1 /* global clock gating */
#define XCHAL_CLOCK_GATING_FUNCUNIT 1 /* funct. unit clock gating */
/* In T1050, applies to selected core load and store instructions (see ISA): */
/* In T1050,
* applies to selected core load and store instructions (see ISA):
*/
#define XCHAL_UNALIGNED_LOAD_EXCEPTION 0 /* unaligned loads cause exc. */
#define XCHAL_UNALIGNED_STORE_EXCEPTION 0 /* unaligned stores cause exc.*/
#define XCHAL_UNALIGNED_LOAD_HW 1 /* unaligned loads work in hw */
@ -166,25 +175,27 @@
#define XCHAL_CORE_ID "esp32_v3_49_prod"
/* alphanum core name
* (CoreID) set in the Xtensa
* Processor Generator */
* Processor Generator
*/
#define XCHAL_BUILD_UNIQUE_ID 0x0005fe96 /* 22-bit sw build ID */
/*
* These definitions describe the hardware targeted by this software.
*/
/* These definitions describe the hardware targeted by this software. */
#define XCHAL_HW_CONFIGID0 0xc2bcfffe /* ConfigID hi 32 bits*/
#define XCHAL_HW_CONFIGID1 0x1cc5fe96 /* ConfigID lo 32 bits*/
#define XCHAL_HW_VERSION_NAME "LX6.0.3" /* full version name */
#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
#define XCHAL_HW_VERSION_MINOR 3 /* minor ver# of targeted hw */
#define XCHAL_HW_VERSION 260003 /* major*100+minor */
#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
#define XCHAL_HW_VERSION_MINOR 3 /* minor ver# of targeted hw */
#define XCHAL_HW_VERSION 260003 /* major*100+minor */
#define XCHAL_HW_REL_LX6 1
#define XCHAL_HW_REL_LX6_0 1
#define XCHAL_HW_REL_LX6_0_3 1
#define XCHAL_HW_CONFIGID_RELIABLE 1
/* If software targets a *range* of hardware versions, these are the bounds: */
/* If software targets a *range* of hardware versions,
* these are the bounds:
*/
#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
#define XCHAL_HW_MIN_VERSION_MINOR 3 /* minor v of earliest tgt hw */
@ -273,6 +284,7 @@
#define XCHAL_INSTROM0_VADDR 0x40800000 /* virtual address */
#define XCHAL_INSTROM0_PADDR 0x40800000 /* physical address */
#define XCHAL_INSTROM0_SIZE 4194304 /* size in bytes */
#define XCHAL_INSTROM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
/* Instruction RAM 0: */
@ -280,6 +292,7 @@
#define XCHAL_INSTRAM0_VADDR 0x40000000 /* virtual address */
#define XCHAL_INSTRAM0_PADDR 0x40000000 /* physical address */
#define XCHAL_INSTRAM0_SIZE 4194304 /* size in bytes */
#define XCHAL_INSTRAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
/* Instruction RAM 1: */
@ -287,6 +300,7 @@
#define XCHAL_INSTRAM1_VADDR 0x40400000 /* virtual address */
#define XCHAL_INSTRAM1_PADDR 0x40400000 /* physical address */
#define XCHAL_INSTRAM1_SIZE 4194304 /* size in bytes */
#define XCHAL_INSTRAM1_ECC_PARITY 0 /* ECC/parity type, 0=none */
/* Data ROM 0: */
@ -294,6 +308,7 @@
#define XCHAL_DATAROM0_VADDR 0x3F400000 /* virtual address */
#define XCHAL_DATAROM0_PADDR 0x3F400000 /* physical address */
#define XCHAL_DATAROM0_SIZE 4194304 /* size in bytes */
#define XCHAL_DATAROM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
#define XCHAL_DATAROM0_BANKS 1 /* number of banks */
@ -301,6 +316,7 @@
#define XCHAL_DATARAM0_VADDR 0x3FF80000 /* virtual address */
#define XCHAL_DATARAM0_PADDR 0x3FF80000 /* physical address */
#define XCHAL_DATARAM0_SIZE 524288 /* size in bytes */
#define XCHAL_DATARAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
#define XCHAL_DATARAM0_BANKS 1 /* number of banks */
@ -309,6 +325,7 @@
#define XCHAL_DATARAM1_VADDR 0x3F800000 /* virtual address */
#define XCHAL_DATARAM1_PADDR 0x3F800000 /* physical address */
#define XCHAL_DATARAM1_SIZE 4194304 /* size in bytes */
#define XCHAL_DATARAM1_ECC_PARITY 0 /* ECC/parity type, 0=none */
#define XCHAL_DATARAM1_BANKS 1 /* number of banks */
@ -316,9 +333,9 @@
#define XCHAL_XLMI0_VADDR 0x3FF00000 /* virtual address */
#define XCHAL_XLMI0_PADDR 0x3FF00000 /* physical address */
#define XCHAL_XLMI0_SIZE 524288 /* size in bytes */
#define XCHAL_XLMI0_ECC_PARITY 0 /* ECC/parity type, 0=none */
#define XCHAL_XLMI0_SIZE 524288 /* size in bytes */
#define XCHAL_XLMI0_ECC_PARITY 0 /* ECC/parity type, 0=none */
#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
/* Interrupts and Timers ****************************************************/
@ -332,9 +349,11 @@
#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
#define XCHAL_NUM_EXTINTERRUPTS 26 /* num of external interrupts */
#define XCHAL_INT_NLEVELS 6 /* number of interrupt levels
(not including level zero) */
* (not including level zero) */
#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
/* (always 1 in XEA1;
* levels 2 .. EXCM_LEVEL are
* "medium priority") */
/* Masks of interrupts at each interrupt level: */
@ -494,7 +513,9 @@
#define XCHAL_EXTINT23_NUM 28 /* (intlevel 4) */
#define XCHAL_EXTINT24_NUM 30 /* (intlevel 4) */
#define XCHAL_EXTINT25_NUM 31 /* (intlevel 5) */
/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
@ -537,6 +558,7 @@
#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
#define XCHAL_VECBASE_RESET_VADDR 0x40000000 /* VECBASE reset value */
#define XCHAL_VECBASE_RESET_PADDR 0x40000000
#define XCHAL_RESET_VECBASE_OVERLAP 0
@ -629,6 +651,7 @@
#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
* [autorefill] and protection)
* usable for an MMU-based OS */
/* If none of the above last 4 are set, it's a custom TLB configuration. */
#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */

View file

@ -6,7 +6,8 @@
* macros, etc.) for this specific Xtensa processor's TIE extensions
* and options. It is customized to this Xtensa processor configuration.
*
* Customer ID=11657; Build=0x5fe96; Copyright (c) 1999-2016 Cadence Design Systems Inc.
* Customer ID=11657; Build=0x5fe96;
* Copyright (c) 1999-2016 Cadence Design Systems Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -37,32 +38,32 @@
****************************************************************************/
/* Selection parameter values for save-area save/restore macros: */
/* Option vs. TIE:
*/
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
/* Option vs. TIE: */
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
/* Whether used automatically by compiler: */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
#define XTHAL_SAS_ANYCC 0x000c /* both of the above */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
#define XTHAL_SAS_ANYCC 0x000c /* both of the above */
/* ABI handling across function calls: */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
/* Misc */
#define XTHAL_SAS_ALL 0xffff /* include all default NCP contents */
#define XTHAL_SAS3(optie,ccuse,abi) (((optie) & XTHAL_SAS_ANYOT) | \
((ccuse) & XTHAL_SAS_ANYCC) | \
((abi) & XTHAL_SAS_ANYABI))
#define XTHAL_SAS_ALL 0xffff /* include all default NCP contents */
#define XTHAL_SAS3(optie,ccuse,abi) (((optie) & XTHAL_SAS_ANYOT) | \
((ccuse) & XTHAL_SAS_ANYCC) | \
((abi) & XTHAL_SAS_ANYABI))
/****************************************************************************
* Assembly Language Macros
@ -74,105 +75,109 @@
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 4 byte aligned address).
* at1..at4 Four temporary address registers (first XTENSA_NCP_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* at1..at4 Four temporary address registers (first
* XTENSA_NCP_NUM_ATMPS registers are clobbered,
* the remaining are unused).
*
* Optional parameters:
* continue If macro invoked as part of a larger store sequence, set to 1
* if this is not the first in the sequence. Defaults to 0.
* ofs Offset from start of larger sequence (from value of first ptr
* in sequence) at which to store. Defaults to next available space
* (or 0 if <continue> is 0).
* select Select what category(ies) of registers to store, as a bitmask
* (see XTHAL_SAS_xxx constants). Defaults to all registers.
* alloc Select what category(ies) of registers to allocate; if any
* category is selected here that is not in <select>, space for
* the corresponding registers is skipped without doing any store.
* continue If macro invoked as part of a larger store sequence, set
* to 1 if this is not the first in the sequence.
* Defaults to 0.
* ofs Offset from start of larger sequence (from value of first
* ptr in sequence) at which to store. Defaults to next
* available space(or 0 if <continue> is 0).
* select Select what category(ies) of registers to store, as a
* bitmask (see XTHAL_SAS_xxx constants).
* Defaults to all registers.
* alloc Select what category(ies) of registers to allocate; if
* any category is selected here that is not in <select>,
* space for the corresponding registers is skipped without
* doing any store.
*/
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
/* Optional global registers used by default by the compiler: */
/* Optional global registers used by default by the compiler: */
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
xchal_sa_align \ptr, 0, 1016, 4, 4
rur.THREADPTR \at1 /* threadptr option */
s32i \at1, \ptr, .Lxchal_ofs_+0
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
xchal_sa_align \ptr, 0, 1016, 4, 4
rur.THREADPTR \at1 /* threadptr option */
s32i \at1, \ptr, .Lxchal_ofs_+0
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
.endif
/* Optional caller-saved registers used by default by the compiler: */
/* Optional caller-saved registers used by default by the compiler: */
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1012, 4, 4
rsr.ACCLO \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr.ACCHI \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
xchal_sa_align \ptr, 0, 1012, 4, 4
rsr.ACCLO \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr.ACCHI \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1012, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
xchal_sa_align \ptr, 0, 1012, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.endif
.endif
/* Optional caller-saved registers not used by default by the compiler: */
/* Optional caller-saved registers not used by default by the compiler: */
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 996, 4, 4
rsr.BR \at1 /* boolean option */
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr.SCOMPARE1 \at1 /* conditional store option */
s32i \at1, \ptr, .Lxchal_ofs_+4
rsr.M0 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+8
rsr.M1 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+12
rsr.M2 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+16
rsr.M3 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+20
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
xchal_sa_align \ptr, 0, 996, 4, 4
rsr.BR \at1 /* boolean option */
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr.SCOMPARE1 \at1 /* conditional store option */
s32i \at1, \ptr, .Lxchal_ofs_+4
rsr.M0 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+8
rsr.M1 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+12
rsr.M2 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+16
rsr.M3 \at1 /* MAC16 option */
s32i \at1, \ptr, .Lxchal_ofs_+20
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 996, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
xchal_sa_align \ptr, 0, 996, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.endif
.endif
/* Custom caller-saved registers not used by default by the compiler: */
/* Custom caller-saved registers not used by default by the compiler: */
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1008, 4, 4
rur.F64R_LO \at1 /* ureg 234 */
s32i \at1, \ptr, .Lxchal_ofs_+0
rur.F64R_HI \at1 /* ureg 235 */
s32i \at1, \ptr, .Lxchal_ofs_+4
rur.F64S \at1 /* ureg 236 */
s32i \at1, \ptr, .Lxchal_ofs_+8
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
xchal_sa_align \ptr, 0, 1008, 4, 4
rur.F64R_LO \at1 /* ureg 234 */
s32i \at1, \ptr, .Lxchal_ofs_+0
rur.F64R_HI \at1 /* ureg 235 */
s32i \at1, \ptr, .Lxchal_ofs_+4
rur.F64S \at1 /* ureg 236 */
s32i \at1, \ptr, .Lxchal_ofs_+8
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1008, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
xchal_sa_align \ptr, 0, 1008, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
.endif
.endm /* xchal_ncp_store */
.endif
.endm /* xchal_ncp_store */
/* Macro to load all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
@ -196,91 +201,91 @@
* the corresponding registers is skipped without doing any load.
*/
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
/* Optional global registers used by default by the compiler: */
/* Optional global registers used by default by the compiler: */
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
xchal_sa_align \ptr, 0, 1016, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.THREADPTR \at1 /* threadptr option */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
xchal_sa_align \ptr, 0, 1016, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.THREADPTR \at1 /* threadptr option */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
.endif
/* Optional caller-saved registers used by default by the compiler: */
/* Optional caller-saved registers used by default by the compiler: */
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1012, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr.ACCLO \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr.ACCHI \at1 /* MAC16 option */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
xchal_sa_align \ptr, 0, 1012, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr.ACCLO \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr.ACCHI \at1 /* MAC16 option */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1012, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
xchal_sa_align \ptr, 0, 1012, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.endif
.endif
/* Optional caller-saved registers not used by default by the compiler: */
/* Optional caller-saved registers not used by default by the compiler: */
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 996, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr.BR \at1 /* boolean option */
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr.SCOMPARE1 \at1 /* conditional store option */
l32i \at1, \ptr, .Lxchal_ofs_+8
wsr.M0 \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+12
wsr.M1 \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+16
wsr.M2 \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+20
wsr.M3 \at1 /* MAC16 option */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
xchal_sa_align \ptr, 0, 996, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr.BR \at1 /* boolean option */
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr.SCOMPARE1 \at1 /* conditional store option */
l32i \at1, \ptr, .Lxchal_ofs_+8
wsr.M0 \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+12
wsr.M1 \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+16
wsr.M2 \at1 /* MAC16 option */
l32i \at1, \ptr, .Lxchal_ofs_+20
wsr.M3 \at1 /* MAC16 option */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 996, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
xchal_sa_align \ptr, 0, 996, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.endif
.endif
/* Custom caller-saved registers not used by default by the compiler: */
/* Custom caller-saved registers not used by default by the compiler: */
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1008, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.F64R_LO \at1 /* ureg 234 */
l32i \at1, \ptr, .Lxchal_ofs_+4
wur.F64R_HI \at1 /* ureg 235 */
l32i \at1, \ptr, .Lxchal_ofs_+8
wur.F64S \at1 /* ureg 236 */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
xchal_sa_align \ptr, 0, 1008, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.F64R_LO \at1 /* ureg 234 */
l32i \at1, \ptr, .Lxchal_ofs_+4
wur.F64R_HI \at1 /* ureg 235 */
l32i \at1, \ptr, .Lxchal_ofs_+8
wur.F64S \at1 /* ureg 236 */
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1008, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
xchal_sa_align \ptr, 0, 1008, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 12
.endif
.endm /* xchal_ncp_load */
.endif
.endm /* xchal_ncp_load */
#define XTENSA_NCP_NUM_ATMPS 1
#define XTENSA_NCP_NUM_ATMPS 1
/* Macro to store the state of TIE coprocessor FPU.
* Required parameters:
@ -291,44 +296,44 @@
* Optional parameters are the same as for xchal_ncp_store.
*/
#define xchal_cp_FPU_store xchal_cp0_store
.macro xchal_cp0_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
#define xchal_cp_FPU_store xchal_cp0_store
.macro xchal_cp0_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
/* Custom caller-saved registers not used by default by the compiler: */
/* Custom caller-saved registers not used by default by the compiler: */
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 948, 4, 4
rur.FCR \at1 /* ureg 232 */
s32i \at1, \ptr, .Lxchal_ofs_+0
rur.FSR \at1 /* ureg 233 */
s32i \at1, \ptr, .Lxchal_ofs_+4
ssi f0, \ptr, .Lxchal_ofs_+8
ssi f1, \ptr, .Lxchal_ofs_+12
ssi f2, \ptr, .Lxchal_ofs_+16
ssi f3, \ptr, .Lxchal_ofs_+20
ssi f4, \ptr, .Lxchal_ofs_+24
ssi f5, \ptr, .Lxchal_ofs_+28
ssi f6, \ptr, .Lxchal_ofs_+32
ssi f7, \ptr, .Lxchal_ofs_+36
ssi f8, \ptr, .Lxchal_ofs_+40
ssi f9, \ptr, .Lxchal_ofs_+44
ssi f10, \ptr, .Lxchal_ofs_+48
ssi f11, \ptr, .Lxchal_ofs_+52
ssi f12, \ptr, .Lxchal_ofs_+56
ssi f13, \ptr, .Lxchal_ofs_+60
ssi f14, \ptr, .Lxchal_ofs_+64
ssi f15, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
xchal_sa_align \ptr, 0, 948, 4, 4
rur.FCR \at1 /* ureg 232 */
s32i \at1, \ptr, .Lxchal_ofs_+0
rur.FSR \at1 /* ureg 233 */
s32i \at1, \ptr, .Lxchal_ofs_+4
ssi f0, \ptr, .Lxchal_ofs_+8
ssi f1, \ptr, .Lxchal_ofs_+12
ssi f2, \ptr, .Lxchal_ofs_+16
ssi f3, \ptr, .Lxchal_ofs_+20
ssi f4, \ptr, .Lxchal_ofs_+24
ssi f5, \ptr, .Lxchal_ofs_+28
ssi f6, \ptr, .Lxchal_ofs_+32
ssi f7, \ptr, .Lxchal_ofs_+36
ssi f8, \ptr, .Lxchal_ofs_+40
ssi f9, \ptr, .Lxchal_ofs_+44
ssi f10, \ptr, .Lxchal_ofs_+48
ssi f11, \ptr, .Lxchal_ofs_+52
ssi f12, \ptr, .Lxchal_ofs_+56
ssi f13, \ptr, .Lxchal_ofs_+60
ssi f14, \ptr, .Lxchal_ofs_+64
ssi f15, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 948, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
xchal_sa_align \ptr, 0, 948, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.endif
.endm /* xchal_cp0_store */
.endif
.endm /* xchal_cp0_store */
/* Macro to load the state of TIE coprocessor FPU.
* Required parameters:
@ -339,63 +344,63 @@
* Optional parameters are the same as for xchal_ncp_load.
*/
#define xchal_cp_FPU_load xchal_cp0_load
.macro xchal_cp0_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
#define xchal_cp_FPU_load xchal_cp0_load
.macro xchal_cp0_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
/* Custom caller-saved registers not used by default by the compiler: */
/* Custom caller-saved registers not used by default by the compiler: */
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 948, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.FCR \at1 /* ureg 232 */
l32i \at1, \ptr, .Lxchal_ofs_+4
wur.FSR \at1 /* ureg 233 */
lsi f0, \ptr, .Lxchal_ofs_+8
lsi f1, \ptr, .Lxchal_ofs_+12
lsi f2, \ptr, .Lxchal_ofs_+16
lsi f3, \ptr, .Lxchal_ofs_+20
lsi f4, \ptr, .Lxchal_ofs_+24
lsi f5, \ptr, .Lxchal_ofs_+28
lsi f6, \ptr, .Lxchal_ofs_+32
lsi f7, \ptr, .Lxchal_ofs_+36
lsi f8, \ptr, .Lxchal_ofs_+40
lsi f9, \ptr, .Lxchal_ofs_+44
lsi f10, \ptr, .Lxchal_ofs_+48
lsi f11, \ptr, .Lxchal_ofs_+52
lsi f12, \ptr, .Lxchal_ofs_+56
lsi f13, \ptr, .Lxchal_ofs_+60
lsi f14, \ptr, .Lxchal_ofs_+64
lsi f15, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
xchal_sa_align \ptr, 0, 948, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.FCR \at1 /* ureg 232 */
l32i \at1, \ptr, .Lxchal_ofs_+4
wur.FSR \at1 /* ureg 233 */
lsi f0, \ptr, .Lxchal_ofs_+8
lsi f1, \ptr, .Lxchal_ofs_+12
lsi f2, \ptr, .Lxchal_ofs_+16
lsi f3, \ptr, .Lxchal_ofs_+20
lsi f4, \ptr, .Lxchal_ofs_+24
lsi f5, \ptr, .Lxchal_ofs_+28
lsi f6, \ptr, .Lxchal_ofs_+32
lsi f7, \ptr, .Lxchal_ofs_+36
lsi f8, \ptr, .Lxchal_ofs_+40
lsi f9, \ptr, .Lxchal_ofs_+44
lsi f10, \ptr, .Lxchal_ofs_+48
lsi f11, \ptr, .Lxchal_ofs_+52
lsi f12, \ptr, .Lxchal_ofs_+56
lsi f13, \ptr, .Lxchal_ofs_+60
lsi f14, \ptr, .Lxchal_ofs_+64
lsi f15, \ptr, .Lxchal_ofs_+68
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 948, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
xchal_sa_align \ptr, 0, 948, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 72
.endif
.endm /* xchal_cp0_load */
.endif
.endm /* xchal_cp0_load */
#define XTENSA_CP0_NUM_ATMPS 1
#define XTENSA_SA_NUM_ATMPS 1
#define XTENSA_CP0_NUM_ATMPS 1
#define XTENSA_SA_NUM_ATMPS 1
/* Empty macros for unconfigured coprocessors: */
/* Empty macros for unconfigured coprocessors: */
.macro xchal_cp1_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp1_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp1_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp1_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
#endif /*__ARCH_XTENSA_INCLUDE_ESP32_TIE_ASM_H*/
#endif /* __ARCH_XTENSA_INCLUDE_ESP32_TIE_ASM_H */

View file

@ -8,7 +8,8 @@
* that extend basic Xtensa core functionality. It is customized to this
* Xtensa processor configuration.
*
* Customer ID=11657; Build=0x5fe96; Copyright (c) 1999-2016 Cadence Design Systems Inc.
* Customer ID=11657; Build=0x5fe96;
* Copyright (c) 1999-2016 Cadence Design Systems Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
@ -107,13 +108,13 @@
* To filter out certain registers, e.g. to expand only the non-global
* registers used by the compiler, you can do something like this:
*
* #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
* #define SELCC0(p...)
* #define SELCC1(abikind,p...) SELAK##abikind(p)
* #define SELAK0(p...) REG(p)
* #define SELAK1(p...) REG(p)
* #define SELAK2(p...)
* #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
* #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
* #define SELCC0(p...)
* #define SELCC1(abikind,p...) SELAK##abikind(p)
* #define SELAK0(p...) REG(p)
* #define SELAK1(p...) REG(p)
* #define SELAK2(p...)
* #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
* ...what you want to expand...
*/
@ -190,4 +191,4 @@
3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
#endif /*_ARCH_XTENSA_INCLUDE_ESP32_TIE_H*/
#endif /* _ARCH_XTENSA_INCLUDE_ESP32_TIE_H */

View file

@ -30,12 +30,14 @@
*
****************************************************************************/
/* This header file is sometimes referred to as the "compile-time HAL" or CHAL.
* It pulls definitions tailored for a specific Xtensa processor configuration.
/* This header file is sometimes referred to as the "compile-time HAL" or
* CHAL. It pulls definitions tailored for a specific Xtensa processor
* configuration.
*
* Sources for binaries meant to be configuration-independent generally avoid
* including this file (they may use the configuration-specific HAL library).
* It is normal for the HAL library source itself to include this file.
* Sources for binaries meant to be configuration-independent generally
* avoid including this file (they may use the configuration-specific HAL
* library). It is normal for the HAL library source itself to include this
* file.
*/
#ifndef __ARCH_XTENSA_INCUDE_XTENSA_CORE_H
@ -103,7 +105,10 @@
/* Level of first high-priority
* interrupt (always 2) */
/* Note: 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <= NUM_INTLEVELS < NMILEVEL <= 15 */
/* Note:
* 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <=
* NUM_INTLEVELS < NMILEVEL <= 15
*/
/* These values are constant for existing Xtensa processor implementations: */
@ -481,7 +486,9 @@
/* Exceptions and Vectors ***************************************************/
/* For backward compatibility ONLY -- DO NOT USE (will be removed in future release): */
/* For backward compatibility ONLY -- DO NOT USE
* (will be removed in future release):
*/
#define XCHAL_HAVE_OLD_EXC_ARCH XCHAL_HAVE_XEA1 /* (DEPRECATED) 1 if
* old exception
@ -513,7 +520,9 @@
# if XCHAL_HAVE_DEBUG
# define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL)
/* This one should only get defined if the corresponding intlevel paddr macro exists: */
/* This one should only get defined if the corresponding intlevel paddr macro
* exists:
*/
# define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL_VECTOR_PADDR(XCHAL_DEBUGLEVEL)
# endif
@ -799,7 +808,9 @@
#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
/* NOTE: constant defined this way to allow movi instead of l32r in reset code. */
/* NOTE:
* constant defined this way to allow movi instead of l32r in reset code.
*/
# define XCHAL_CACHE_MEMCTL_DEFAULT 0xFFFFFF00 /* Init all possible ways */
#else
@ -824,12 +835,14 @@
/* See <xtensa/config/core-matmap.h> for more details. */
/* Has different semantic in open source headers (where it means HAVE_PTP_MMU),
* so comment out starting with RB-2008.3 release; later, might get
* get reintroduced as a synonym for XCHAL_HAVE_PTP_MMU instead:
/* Has different semantic in open source headers (where it means
* HAVE_PTP_MMU), so comment out starting with RB-2008.3 release; later,
* might get get reintroduced as a synonym for XCHAL_HAVE_PTP_MMU instead:
*/
/* #define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS*//* (DEPRECATED; use XCHAL_HAVE_TLBS instead) */
/* #define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS
* (DEPRECATED; use XCHAL_HAVE_TLBS instead)
*/
/* Indexing macros: */
@ -843,7 +856,8 @@
#define XCHAL_DTLB_SET_E(n,i,what) _XCHAL_DTLB_SET_E(n,i, _ ## what )
/* Example use: XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)
* to get the value of XCHAL_ITLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
* to get the value of XCHAL_ITLB_SET<n>_ENTRIES where <n> is the first
* auto-refill set.
*/
/* Number of entries per autorefill way: */
@ -905,14 +919,14 @@
# define XCHAL_SEG_MAPPABLE_VADDR 0x00000000 /* start of largest
* non-static-mapped virtual
* addr area */
# define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of " */
# define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of */
/* define XCHAL_SEG_MAPPABLE2_xxx if more areas present, sorted in order of
* descending size.
*/
#endif
/* Misc. ****************************************************
/* Misc. ********************************************************************/
/* Data alignment required if used for instructions: */
@ -923,8 +937,9 @@
#endif
/* Names kept for backward compatibility.
* (Here "RELEASE" is now a misnomer; these are product *versions*, not the releases
* under which they are released. In the T10##.# era there was no distinction.)
* (Here "RELEASE" is now a misnomer; these are product *versions*, not the
* releases under which they are released.
* In the T10##.# era there was no distinction.)
*/
#define XCHAL_HW_RELEASE_MAJOR XCHAL_HW_VERSION_MAJOR
@ -946,15 +961,15 @@
* original ptr to next load/store location.
*/
.macro xchal_sa_start continue totofs
.ifeq \continue
.set .Lxchal_pofs_, 0 /* offset from original ptr to current \ptr */
.set .Lxchal_ofs_, 0 /* offset from current \ptr to next load/store location */
.endif
.if \totofs + 1 /* if totofs specified (not -1) */
.set .Lxchal_ofs_, \totofs - .Lxchal_pofs_ /* specific offset from original ptr */
.endif
.endm
.macro xchal_sa_start continue totofs
.ifeq \continue
.set .Lxchal_pofs_, 0 /* offset from original ptr to current \ptr */
.set .Lxchal_ofs_, 0 /* offset from current \ptr to next load/store location */
.endif
.if \totofs + 1 /* if totofs specified (not -1) */
.set .Lxchal_ofs_, \totofs - .Lxchal_pofs_ /* specific offset from original ptr */
.endif
.endm
/* Align portion of save area and bring ptr in range if necessary. Used by
* save area load/store sequences. Not usually invoked directly. Allows
@ -967,29 +982,30 @@
* load/store loc (pow of 2)
*/
.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
/* First align where we start accessing the next register
* per \totalign relative to original ptr (i.e. start of the save area):
*/
/* First align where we start accessing the next register
* per \totalign relative to original ptr (i.e. start of the save area):
*/
.set .Lxchal_ofs_, ((.Lxchal_pofs_ + .Lxchal_ofs_ + \totalign - 1) & -\totalign) - .Lxchal_pofs_
.set .Lxchal_ofs_, ((.Lxchal_pofs_ + .Lxchal_ofs_ + \totalign - 1) & -\totalign) - .Lxchal_pofs_
/* If necessary, adjust \ptr to bring .Lxchal_ofs_ in acceptable range: */
/* If necessary, adjust \ptr to bring .Lxchal_ofs_ in acceptable range: */
.if (((\maxofs) - .Lxchal_ofs_) & 0xC0000000) | ((.Lxchal_ofs_ - (\minofs)) & 0xC0000000) | (.Lxchal_ofs_ & (\ofsalign-1))
.set .Ligmask, 0xFFFFFFFF /* TODO: optimize to addmi, per aligns and .Lxchal_ofs_ */
addi \ptr, \ptr, (.Lxchal_ofs_ & .Ligmask)
.set .Lxchal_pofs_, .Lxchal_pofs_ + (.Lxchal_ofs_ & .Ligmask)
.set .Lxchal_ofs_, (.Lxchal_ofs_ & ~.Ligmask)
.endif
.endm
.if (((\maxofs) - .Lxchal_ofs_) & 0xC0000000) | ((.Lxchal_ofs_ - (\minofs)) & 0xC0000000) | (.Lxchal_ofs_ & (\ofsalign-1))
.set .Ligmask, 0xFFFFFFFF /* TODO: optimize to addmi, per aligns and .Lxchal_ofs_ */
addi \ptr, \ptr, (.Lxchal_ofs_ & .Ligmask)
.set .Lxchal_pofs_, .Lxchal_pofs_ + (.Lxchal_ofs_ & .Ligmask)
.set .Lxchal_ofs_, (.Lxchal_ofs_ & ~.Ligmask)
.endif
.endm
/* We could optimize for addi to expand to only addmi instead of
* "addmi;addi", where possible. Here's a partial example how:
*
* .set .Lmaxmask, -(\ofsalign) & -(\totalign)
* .if (((\maxofs) + ~.Lmaxmask + 1) & 0xFFFFFF00) && ((.Lxchal_ofs_ & ~.Lmaxmask) == 0)
* .if (((\maxofs) + ~.Lmaxmask + 1) & 0xFFFFFF00) &&
* ((.Lxchal_ofs_ & ~.Lmaxmask) == 0)
* .set .Ligmask, 0xFFFFFF00
* .elif ... ditto for negative ofs range ...
* .set .Ligmask, 0xFFFFFF00
@ -997,17 +1013,17 @@
* .else
* .set .Ligmask, 0xFFFFFFFF
* .endif
*
*/
/* Invoke this after xchal_XXX_{load,store} macros to restore \ptr. */
.macro xchal_sa_ptr_restore ptr
.if .Lxchal_pofs_
addi \ptr, \ptr, - .Lxchal_pofs_
.set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_
.set .Lxchal_pofs_, 0
.endif
.endm
.macro xchal_sa_ptr_restore ptr
.if .Lxchal_pofs_
addi \ptr, \ptr, - .Lxchal_pofs_
.set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_
.set .Lxchal_pofs_, 0
.endif
.endm
/* Use as eg:
* xchal_atmps_store a1, SOMEOFS, XCHAL_SA_NUM_ATMPS, a4, a5
@ -1023,50 +1039,51 @@
#define xchal_atmps_store xchal_atmps_loadstore s32i,
#define xchal_atmps_load xchal_atmps_loadstore l32i,
.macro xchal_atmps_loadstore inst ptr offset nreq aa=0 ab=0 ac=0 ad=0
.set .Lnsaved_, 0
.irp reg,\aa,\ab,\ac,\ad
.ifeq 0x\reg ; .set .Lnsaved_,.Lnsaved_+1 ; .endif
.endr
.set .Laofs_, 0
.irp reg,\aa,\ab,\ac,\ad
.ifgt (\nreq)-.Lnsaved_
\inst \reg, \ptr, .Laofs_+\offset
.set .Laofs_,.Laofs_+4
.set .Lnsaved_,.Lnsaved_+1
.endif
.endr
.endm
.macro xchal_atmps_loadstore inst ptr offset nreq aa=0 ab=0 ac=0 ad=0
.set .Lnsaved_, 0
.irp reg,\aa,\ab,\ac,\ad
.ifeq 0x\reg ; .set .Lnsaved_,.Lnsaved_+1 ; .endif
.endr
.set .Laofs_, 0
.irp reg,\aa,\ab,\ac,\ad
.ifgt (\nreq)-.Lnsaved_
\inst \reg, \ptr, .Laofs_+\offset
.set .Laofs_,.Laofs_+4
.set .Lnsaved_,.Lnsaved_+1
.endif
.endr
.endm
/* #define xchal_ncp_load_a2 xchal_ncp_load a2,a3,a4,a5,a6 */
/* #define xchal_ncp_load_a2 xchal_ncp_load a2,a3,a4,a5,a6 */
/* #define xchal_ncp_store_a2 xchal_ncp_store a2,a3,a4,a5,a6 */
# define xchal_extratie_load xchal_ncptie_load
# define xchal_extratie_store xchal_ncptie_store
# define xchal_extratie_load_a2 xchal_ncptie_load a2,a3,a4,a5,a6
# define xchal_extratie_store_a2 xchal_ncptie_store a2,a3,a4,a5,a6
# define xchal_extra_load xchal_ncp_load
# define xchal_extra_store xchal_ncp_store
# define xchal_extra_load_a2 xchal_ncp_load a2,a3,a4,a5,a6
# define xchal_extra_store_a2 xchal_ncp_store a2,a3,a4,a5,a6
# define xchal_extra_load_funcbody xchal_ncp_load a2,a3,a4,a5,a6
# define xchal_extra_store_funcbody xchal_ncp_store a2,a3,a4,a5,a6
# define xchal_cp0_store_a2 xchal_cp0_store a2,a3,a4,a5,a6
# define xchal_cp0_load_a2 xchal_cp0_load a2,a3,a4,a5,a6
# define xchal_cp1_store_a2 xchal_cp1_store a2,a3,a4,a5,a6
# define xchal_cp1_load_a2 xchal_cp1_load a2,a3,a4,a5,a6
# define xchal_cp2_store_a2 xchal_cp2_store a2,a3,a4,a5,a6
# define xchal_cp2_load_a2 xchal_cp2_load a2,a3,a4,a5,a6
# define xchal_cp3_store_a2 xchal_cp3_store a2,a3,a4,a5,a6
# define xchal_cp3_load_a2 xchal_cp3_load a2,a3,a4,a5,a6
# define xchal_cp4_store_a2 xchal_cp4_store a2,a3,a4,a5,a6
# define xchal_cp4_load_a2 xchal_cp4_load a2,a3,a4,a5,a6
# define xchal_cp5_store_a2 xchal_cp5_store a2,a3,a4,a5,a6
# define xchal_cp5_load_a2 xchal_cp5_load a2,a3,a4,a5,a6
# define xchal_cp6_store_a2 xchal_cp6_store a2,a3,a4,a5,a6
# define xchal_cp6_load_a2 xchal_cp6_load a2,a3,a4,a5,a6
# define xchal_cp7_store_a2 xchal_cp7_store a2,a3,a4,a5,a6
# define xchal_cp7_load_a2 xchal_cp7_load a2,a3,a4,a5,a6
/* #define xchal_ncp_store_a2 xchal_ncp_store a2,a3,a4,a5,a6 */
# define xchal_extratie_load xchal_ncptie_load
# define xchal_extratie_store xchal_ncptie_store
# define xchal_extratie_load_a2 xchal_ncptie_load a2,a3,a4,a5,a6
# define xchal_extratie_store_a2 xchal_ncptie_store a2,a3,a4,a5,a6
# define xchal_extra_load xchal_ncp_load
# define xchal_extra_store xchal_ncp_store
# define xchal_extra_load_a2 xchal_ncp_load a2,a3,a4,a5,a6
# define xchal_extra_store_a2 xchal_ncp_store a2,a3,a4,a5,a6
# define xchal_extra_load_funcbody xchal_ncp_load a2,a3,a4,a5,a6
# define xchal_extra_store_funcbody xchal_ncp_store a2,a3,a4,a5,a6
# define xchal_cp0_store_a2 xchal_cp0_store a2,a3,a4,a5,a6
# define xchal_cp0_load_a2 xchal_cp0_load a2,a3,a4,a5,a6
# define xchal_cp1_store_a2 xchal_cp1_store a2,a3,a4,a5,a6
# define xchal_cp1_load_a2 xchal_cp1_load a2,a3,a4,a5,a6
# define xchal_cp2_store_a2 xchal_cp2_store a2,a3,a4,a5,a6
# define xchal_cp2_load_a2 xchal_cp2_load a2,a3,a4,a5,a6
# define xchal_cp3_store_a2 xchal_cp3_store a2,a3,a4,a5,a6
# define xchal_cp3_load_a2 xchal_cp3_load a2,a3,a4,a5,a6
# define xchal_cp4_store_a2 xchal_cp4_store a2,a3,a4,a5,a6
# define xchal_cp4_load_a2 xchal_cp4_load a2,a3,a4,a5,a6
# define xchal_cp5_store_a2 xchal_cp5_store a2,a3,a4,a5,a6
# define xchal_cp5_load_a2 xchal_cp5_load a2,a3,a4,a5,a6
# define xchal_cp6_store_a2 xchal_cp6_store a2,a3,a4,a5,a6
# define xchal_cp6_load_a2 xchal_cp6_load a2,a3,a4,a5,a6
# define xchal_cp7_store_a2 xchal_cp7_store a2,a3,a4,a5,a6
# define xchal_cp7_load_a2 xchal_cp7_load a2,a3,a4,a5,a6
/* Empty placeholder macros for undefined coprocessors: */
@ -1117,121 +1134,121 @@
* Exit: any register a2-a15 (?) may have been clobbered.
*/
.macro xchal_cpi_store_funcbody
.macro xchal_cpi_store_funcbody
#if (XCHAL_CP_MASK & ~XCHAL_CP_PORT_MASK)
# if XCHAL_CP0_SA_SIZE
bnez a3, 99f
xchal_cp0_store_a2
j 90f
bnez a3, 99f
xchal_cp0_store_a2
j 90f
99:
# endif
# if XCHAL_CP1_SA_SIZE
bnei a3, 1, 99f
xchal_cp1_store_a2
j 90f
bnei a3, 1, 99f
xchal_cp1_store_a2
j 290f
99:
# endif
# if XCHAL_CP2_SA_SIZE
bnei a3, 2, 99f
xchal_cp2_store_a2
j 90f
bnei a3, 2, 99f
xchal_cp2_store_a2
j 90f
99:
# endif
# if XCHAL_CP3_SA_SIZE
bnei a3, 3, 99f
xchal_cp3_store_a2
j 90f
bnei a3, 3, 99f
xchal_cp3_store_a2
j 90f
99:
# endif
# if XCHAL_CP4_SA_SIZE
bnei a3, 4, 99f
xchal_cp4_store_a2
j 90f
bnei a3, 4, 99f
xchal_cp4_store_a2
j 90f
99:
# endif
# if XCHAL_CP5_SA_SIZE
bnei a3, 5, 99f
xchal_cp5_store_a2
j 90f
bnei a3, 5, 99f
xchal_cp5_store_a2
j 90f
99:
# endif
# if XCHAL_CP6_SA_SIZE
bnei a3, 6, 99f
xchal_cp6_store_a2
j 90f
bnei a3, 6, 99f
xchal_cp6_store_a2
j 90f
99:
# endif
# if XCHAL_CP7_SA_SIZE
bnei a3, 7, 99f
xchal_cp7_store_a2
j 90f
bnei a3, 7, 99f
xchal_cp7_store_a2
j 90f
99:
# endif
90:
#endif
.endm
.endm
/* Macro that expands to the body of a function that loads the selected coprocessor's state
* (registers etc).
/* Macro that expands to the body of a function that loads the selected
* coprocessor's state (registers etc).
*
* Entry: a2 = ptr to save area from which to restore cp state
* a3 = coprocessor number
* Exit: any register a2-a15 (?) may have been clobbered.
*/
.macro xchal_cpi_load_funcbody
.macro xchal_cpi_load_funcbody
#if (XCHAL_CP_MASK & ~XCHAL_CP_PORT_MASK)
# if XCHAL_CP0_SA_SIZE
bnez a3, 99f
xchal_cp0_load_a2
j 90f
bnez a3, 99f
xchal_cp0_load_a2
j 90f
99:
# endif
# if XCHAL_CP1_SA_SIZE
bnei a3, 1, 99f
xchal_cp1_load_a2
j 90f
bnei a3, 1, 99f
xchal_cp1_load_a2
j 90f
99:
# endif
# if XCHAL_CP2_SA_SIZE
bnei a3, 2, 99f
xchal_cp2_load_a2
j 90f
bnei a3, 2, 99f
xchal_cp2_load_a2
j 90f
99:
# endif
# if XCHAL_CP3_SA_SIZE
bnei a3, 3, 99f
xchal_cp3_load_a2
j 90f
bnei a3, 3, 99f
xchal_cp3_load_a2
j 90f
99:
# endif
# if XCHAL_CP4_SA_SIZE
bnei a3, 4, 99f
xchal_cp4_load_a2
j 90f
bnei a3, 4, 99f
xchal_cp4_load_a2
j 90f
99:
# endif
# if XCHAL_CP5_SA_SIZE
bnei a3, 5, 99f
xchal_cp5_load_a2
j 90f
bnei a3, 5, 99f
xchal_cp5_load_a2
j 90f
99:
# endif
# if XCHAL_CP6_SA_SIZE
bnei a3, 6, 99f
xchal_cp6_load_a2
j 90f
bnei a3, 6, 99f
xchal_cp6_load_a2
j 90f
99:
# endif
# if XCHAL_CP7_SA_SIZE
bnei a3, 7, 99f
xchal_cp7_load_a2
j 90f
bnei a3, 7, 99f
xchal_cp7_load_a2
j 90f
99:
# endif
90:
#endif
.endm
.endm
#endif /* __ASSEMBLY__ */
@ -1373,8 +1390,9 @@
/* Because information as to exactly which hardware version is targeted
* by a given software build is not always available, compile-time HAL
* Hardware-Release "_AT" macros are fuzzy (return 0, 1, or XCHAL_MAYBE):
* (Here "RELEASE" is now a misnomer; these are product *versions*, not the releases
* under which they are released. In the T10##.# era there was no distinction.)
* (Here "RELEASE" is now a misnomer; these are product *versions*, not the
* releases under which they are released. In the T10##.# era there was no
* distinction.)
*/
#if XCHAL_HW_CONFIGID_RELIABLE
@ -1396,8 +1414,10 @@
/* Specific errata: */
/* Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4 (fixed in T1040.3 and T1050.1;
* relevant only in XEA1, kernel-vector mode, level-one interrupts and overflows enabled):
/* Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4
* (fixed in T1040.3 and T1050.1;
* relevant only in XEA1, kernel-vector mode,
* level-one interrupts and overflows enabled):
*/
#define XCHAL_MAYHAVE_ERRATUM_XEA1KWIN \
@ -1414,7 +1434,7 @@
#if (XCHAL_HW_MAX_VERSION >= XTENSA_HWVERSION_RE_2013_2 && \
XCHAL_HW_MIN_VERSION <= XTENSA_HWVERSION_RF_2014_0 && \
XCHAL_ICACHE_SIZE != 0 && XCHAL_HAVE_PIF /*covers also AXI/AHB*/ && \
XCHAL_ICACHE_SIZE != 0 && XCHAL_HAVE_PIF /* covers also AXI/AHB */ && \
XCHAL_HAVE_LOOPS && XCHAL_LOOP_BUFFER_SIZE != 0 && \
XCHAL_CLOCK_GATING_GLOBAL && !defined(_NO_ERRATUM_453))
# define XCHAL_ERRATUM_453 1
@ -1440,4 +1460,4 @@
# define XCHAL_ERRATUM_497 0
#endif
#endif /*__ARCH_XTENSA_INCUDE_XTENSA_CORE_H*/
#endif /* __ARCH_XTENSA_INCUDE_XTENSA_CORE_H */

View file

@ -46,7 +46,9 @@
#if XCHAL_CP_NUM > 0
/* Align a value up/down to nearest n-byte boundary, where n is a power of 2. */
/* Align a value up/down to nearest n-byte boundary,
* where n is a power of 2.
*/
#define _CP_MASK(n) ((n) - 1)
#define _CP_ALIGNUP(n,val) (((val) + _CP_MASK(n)) & ~_CP_MASK(n))

View file

@ -65,25 +65,40 @@
#define EXCCAUSE_SPECULATION 7 /* Use of Failed Speculative Access (not implemented) */
#define EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction */
#define EXCCAUSE_UNALIGNED 9 /* Unaligned Load or Store */
/* Reserved 10-11 */
#define EXCCAUSE_INSTR_DATA_ERROR 12 /* PIF Data Error on Instruction Fetch (RB-200x and later) */
#define EXCCAUSE_INSTR_DATA_ERROR 12 /* PIF Data Error on Instruction Fetch (RB-200x and later) */
#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 /* PIF Data Error on Load or Store (RB-200x and later) */
#define EXCCAUSE_INSTR_ADDR_ERROR 14 /* PIF Address Error on Instruction Fetch (RB-200x and later) */
#define EXCCAUSE_INSTR_ADDR_ERROR 14 /* PIF Address Error on Instruction Fetch (RB-200x and later) */
#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 /* PIF Address Error on Load or Store (RB-200x and later) */
#define EXCCAUSE_ITLB_MISS 16 /* ITLB Miss (no ITLB entry matches, hw refill also missed) */
#define EXCCAUSE_ITLB_MULTIHIT 17 /* ITLB Multihit (multiple ITLB entries match) */
#define EXCCAUSE_INSTR_RING 18 /* Ring Privilege Violation on Instruction Fetch */
/* Reserved 19 *//* Size Restriction on IFetch (not implemented) */
#define EXCCAUSE_ITLB_MISS 16 /* ITLB Miss (no ITLB entry matches, hw refill also missed) */
#define EXCCAUSE_ITLB_MULTIHIT 17 /* ITLB Multihit (multiple ITLB entries match) */
#define EXCCAUSE_INSTR_RING 18 /* Ring Privilege Violation on Instruction Fetch */
/* Reserved 19 Size Restriction on IFetch
* (not implemented)
*/
#define EXCCAUSE_INSTR_PROHIBITED 20 /* Cache Attribute does not allow Instruction Fetch */
/* Reserved 21..23 */
#define EXCCAUSE_DTLB_MISS 24 /* DTLB Miss (no DTLB entry matches, hw refill also missed) */
#define EXCCAUSE_DTLB_MULTIHIT 25 /* DTLB Multihit (multiple DTLB entries match) */
#define EXCCAUSE_LOAD_STORE_RING 26 /* Ring Privilege Violation on Load or Store */
/* Reserved 27 *//* Size Restriction on Load/Store (not implemented) */
/* Reserved 27 Size Restriction on Load/Store
* (not implemented)
*/
#define EXCCAUSE_LOAD_PROHIBITED 28 /* Cache Attribute does not allow Load */
#define EXCCAUSE_STORE_PROHIBITED 29 /* Cache Attribute does not allow Store */
/* Reserved 30-31 */
#define EXCCAUSE_CP_DISABLED(n) (32+(n)) /* Access to Coprocessor 'n' when disabled */
# define EXCCAUSE_CP0_DISABLED 32 /* Access to Coprocessor 0 when disabled */
# define EXCCAUSE_CP1_DISABLED 33 /* Access to Coprocessor 1 when disabled */
# define EXCCAUSE_CP2_DISABLED 34 /* Access to Coprocessor 2 when disabled */
@ -92,6 +107,7 @@
# define EXCCAUSE_CP5_DISABLED 37 /* Access to Coprocessor 5 when disabled */
# define EXCCAUSE_CP6_DISABLED 38 /* Access to Coprocessor 6 when disabled */
# define EXCCAUSE_CP7_DISABLED 39 /* Access to Coprocessor 7 when disabled */
/* Reserved 40..63 */
/* PS register fields: */
@ -203,4 +219,4 @@
#define MEMCTL_DCW_CLR_MASK (MEMCTL_DCWU_CLR_MASK | MEMCTL_DCWA_CLR_MASK)
#define MEMCTL_IDCW_CLR_MASK (MEMCTL_DCW_CLR_MASK | MEMCTL_ICWU_CLR_MASK)
#endif /*__ARCH_EXTENSA_INCLUDE_XTENSA_XTENSA_COREBITS_H*/
#endif /* __ARCH_EXTENSA_INCLUDE_XTENSA_XTENSA_COREBITS_H */